summaryrefslogtreecommitdiffstats
path: root/Objects/complexobject.c
blob: c6e33433cff2a6608d6322552f7ed29b690ec8f2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064

/* Complex object implementation */

/* Borrows heavily from floatobject.c */

/* Submitted by Jim Hugunin */

#include "Python.h"
#include "structmember.h"

#ifndef WITHOUT_COMPLEX

/* Precisions used by repr() and str(), respectively.

   The repr() precision (17 significant decimal digits) is the minimal number
   that is guaranteed to have enough precision so that if the number is read
   back in the exact same binary value is recreated.  This is true for IEEE
   floating point by design, and also happens to work for all other modern
   hardware.

   The str() precision is chosen so that in most cases, the rounding noise
   created by various operations is suppressed, while giving plenty of
   precision for practical use.
*/

#define PREC_REPR	17
#define PREC_STR	12

/* elementary operations on complex numbers */

static Py_complex c_1 = {1., 0.};

Py_complex
c_sum(Py_complex a, Py_complex b)
{
	Py_complex r;
	r.real = a.real + b.real;
	r.imag = a.imag + b.imag;
	return r;
}

Py_complex
c_diff(Py_complex a, Py_complex b)
{
	Py_complex r;
	r.real = a.real - b.real;
	r.imag = a.imag - b.imag;
	return r;
}

Py_complex
c_neg(Py_complex a)
{
	Py_complex r;
	r.real = -a.real;
	r.imag = -a.imag;
	return r;
}

Py_complex
c_prod(Py_complex a, Py_complex b)
{
	Py_complex r;
	r.real = a.real*b.real - a.imag*b.imag;
	r.imag = a.real*b.imag + a.imag*b.real;
	return r;
}

Py_complex
c_quot(Py_complex a, Py_complex b)
{
	/******************************************************************
	This was the original algorithm.  It's grossly prone to spurious
	overflow and underflow errors.  It also merrily divides by 0 despite
	checking for that(!).  The code still serves a doc purpose here, as
	the algorithm following is a simple by-cases transformation of this
	one:

	Py_complex r;
	double d = b.real*b.real + b.imag*b.imag;
	if (d == 0.)
		errno = EDOM;
	r.real = (a.real*b.real + a.imag*b.imag)/d;
	r.imag = (a.imag*b.real - a.real*b.imag)/d;
	return r;
	******************************************************************/

	/* This algorithm is better, and is pretty obvious:  first divide the
	 * numerators and denominator by whichever of {b.real, b.imag} has
	 * larger magnitude.  The earliest reference I found was to CACM
	 * Algorithm 116 (Complex Division, Robert L. Smith, Stanford
	 * University).  As usual, though, we're still ignoring all IEEE
	 * endcases.
	 */
	 Py_complex r;	/* the result */
 	 const double abs_breal = b.real < 0 ? -b.real : b.real;
	 const double abs_bimag = b.imag < 0 ? -b.imag : b.imag;

	 if (abs_breal >= abs_bimag) {
 		/* divide tops and bottom by b.real */
	 	if (abs_breal == 0.0) {
	 		errno = EDOM;
	 		r.real = r.imag = 0.0;
	 	}
	 	else {
	 		const double ratio = b.imag / b.real;
	 		const double denom = b.real + b.imag * ratio;
	 		r.real = (a.real + a.imag * ratio) / denom;
	 		r.imag = (a.imag - a.real * ratio) / denom;
	 	}
	}
	else {
		/* divide tops and bottom by b.imag */
		const double ratio = b.real / b.imag;
		const double denom = b.real * ratio + b.imag;
		assert(b.imag != 0.0);
		r.real = (a.real * ratio + a.imag) / denom;
		r.imag = (a.imag * ratio - a.real) / denom;
	}
	return r;
}

Py_complex
c_pow(Py_complex a, Py_complex b)
{
	Py_complex r;
	double vabs,len,at,phase;
	if (b.real == 0. && b.imag == 0.) {
		r.real = 1.;
		r.imag = 0.;
	}
	else if (a.real == 0. && a.imag == 0.) {
		if (b.imag != 0. || b.real < 0.)
			errno = EDOM;
		r.real = 0.;
		r.imag = 0.;
	}
	else {
		vabs = hypot(a.real,a.imag);
		len = pow(vabs,b.real);
		at = atan2(a.imag, a.real);
		phase = at*b.real;
		if (b.imag != 0.0) {
			len /= exp(at*b.imag);
			phase += b.imag*log(vabs);
		}
		r.real = len*cos(phase);
		r.imag = len*sin(phase);
	}
	return r;
}

static Py_complex
c_powu(Py_complex x, long n)
{
	Py_complex r, p;
	long mask = 1;
	r = c_1;
	p = x;
	while (mask > 0 && n >= mask) {
		if (n & mask)
			r = c_prod(r,p);
		mask <<= 1;
		p = c_prod(p,p);
	}
	return r;
}

static Py_complex
c_powi(Py_complex x, long n)
{
	Py_complex cn;

	if (n > 100 || n < -100) {
		cn.real = (double) n;
		cn.imag = 0.;
		return c_pow(x,cn);
	}
	else if (n > 0)
		return c_powu(x,n);
	else
		return c_quot(c_1,c_powu(x,-n));

}

static PyObject *
complex_subtype_from_c_complex(PyTypeObject *type, Py_complex cval)
{
	PyObject *op;

	op = type->tp_alloc(type, 0);
	if (op != NULL)
		((PyComplexObject *)op)->cval = cval;
	return op;
}

PyObject *
PyComplex_FromCComplex(Py_complex cval)
{
	register PyComplexObject *op;

	/* Inline PyObject_New */
	op = (PyComplexObject *) PyObject_MALLOC(sizeof(PyComplexObject));
	if (op == NULL)
		return PyErr_NoMemory();
	PyObject_INIT(op, &PyComplex_Type);
	op->cval = cval;
	return (PyObject *) op;
}

static PyObject *
complex_subtype_from_doubles(PyTypeObject *type, double real, double imag)
{
	Py_complex c;
	c.real = real;
	c.imag = imag;
	return complex_subtype_from_c_complex(type, c);
}

PyObject *
PyComplex_FromDoubles(double real, double imag)
{
	Py_complex c;
	c.real = real;
	c.imag = imag;
	return PyComplex_FromCComplex(c);
}

double
PyComplex_RealAsDouble(PyObject *op)
{
	if (PyComplex_Check(op)) {
		return ((PyComplexObject *)op)->cval.real;
	}
	else {
		return PyFloat_AsDouble(op);
	}
}

double
PyComplex_ImagAsDouble(PyObject *op)
{
	if (PyComplex_Check(op)) {
		return ((PyComplexObject *)op)->cval.imag;
	}
	else {
		return 0.0;
	}
}

Py_complex
PyComplex_AsCComplex(PyObject *op)
{
	Py_complex cv;
	if (PyComplex_Check(op)) {
		return ((PyComplexObject *)op)->cval;
	}
	else {
		cv.real = PyFloat_AsDouble(op);
		cv.imag = 0.;
		return cv;
	}
}

static void
complex_dealloc(PyObject *op)
{
	op->ob_type->tp_free(op);
}


static void
complex_to_buf(char *buf, int bufsz, PyComplexObject *v, int precision)
{
	char format[32];
	if (v->cval.real == 0.) {
		PyOS_snprintf(format, 32, "%%.%ig", precision);
		PyOS_ascii_formatd(buf, bufsz, format, v->cval.imag);
		strncat(buf, "j", bufsz);
	} else {
		char re[64], im[64];
		/* Format imaginary part with sign, real part without */
		PyOS_snprintf(format, 32, "%%.%ig", precision);
		PyOS_ascii_formatd(re, 64, format, v->cval.real);
		PyOS_snprintf(format, 32, "%%+.%ig", precision);
		PyOS_ascii_formatd(im, 64, format, v->cval.imag);
		PyOS_snprintf(buf, bufsz, "(%s%sj)", re, im);
	}
}

static int
complex_print(PyComplexObject *v, FILE *fp, int flags)
{
	char buf[100];
	complex_to_buf(buf, sizeof(buf), v,
		       (flags & Py_PRINT_RAW) ? PREC_STR : PREC_REPR);
	fputs(buf, fp);
	return 0;
}

static PyObject *
complex_repr(PyComplexObject *v)
{
	char buf[100];
	complex_to_buf(buf, sizeof(buf), v, PREC_REPR);
	return PyString_FromString(buf);
}

static PyObject *
complex_str(PyComplexObject *v)
{
	char buf[100];
	complex_to_buf(buf, sizeof(buf), v, PREC_STR);
	return PyString_FromString(buf);
}

static long
complex_hash(PyComplexObject *v)
{
	long hashreal, hashimag, combined;
	hashreal = _Py_HashDouble(v->cval.real);
	if (hashreal == -1)
		return -1;
	hashimag = _Py_HashDouble(v->cval.imag);
	if (hashimag == -1)
		return -1;
	/* Note:  if the imaginary part is 0, hashimag is 0 now,
	 * so the following returns hashreal unchanged.  This is
	 * important because numbers of different types that
	 * compare equal must have the same hash value, so that
	 * hash(x + 0*j) must equal hash(x).
	 */
	combined = hashreal + 1000003 * hashimag;
	if (combined == -1)
		combined = -2;
	return combined;
}

/* This macro may return! */
#define TO_COMPLEX(obj, c) \
	if (PyComplex_Check(obj)) \
		c = ((PyComplexObject *)(obj))->cval; \
	else if (to_complex(&(obj), &(c)) < 0) \
		return (obj)

static int
to_complex(PyObject **pobj, Py_complex *pc)
{
    PyObject *obj = *pobj;

    pc->real = pc->imag = 0.0;
    if (PyInt_Check(obj)) {
        pc->real = PyInt_AS_LONG(obj);
        return 0;
    }
    if (PyLong_Check(obj)) {
        pc->real = PyLong_AsDouble(obj);
        if (pc->real == -1.0 && PyErr_Occurred()) {
            *pobj = NULL;
            return -1;
        }
        return 0;
    }
    if (PyFloat_Check(obj)) {
        pc->real = PyFloat_AsDouble(obj);
        return 0;
    }
    Py_INCREF(Py_NotImplemented);
    *pobj = Py_NotImplemented;
    return -1;
}
		

static PyObject *
complex_add(PyObject *v, PyObject *w)
{
	Py_complex result;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);
	PyFPE_START_PROTECT("complex_add", return 0)
	result = c_sum(a, b);
	PyFPE_END_PROTECT(result)
	return PyComplex_FromCComplex(result);
}

static PyObject *
complex_sub(PyObject *v, PyObject *w)
{
	Py_complex result;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);
	PyFPE_START_PROTECT("complex_sub", return 0)
	result = c_diff(a, b);
	PyFPE_END_PROTECT(result)
	return PyComplex_FromCComplex(result);
}

static PyObject *
complex_mul(PyObject *v, PyObject *w)
{
	Py_complex result;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);
	PyFPE_START_PROTECT("complex_mul", return 0)
	result = c_prod(a, b);
	PyFPE_END_PROTECT(result)
	return PyComplex_FromCComplex(result);
}

static PyObject *
complex_div(PyObject *v, PyObject *w)
{
	Py_complex quot;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);
	PyFPE_START_PROTECT("complex_div", return 0)
	errno = 0;
	quot = c_quot(a, b);
	PyFPE_END_PROTECT(quot)
	if (errno == EDOM) {
		PyErr_SetString(PyExc_ZeroDivisionError, "complex division");
		return NULL;
	}
	return PyComplex_FromCComplex(quot);
}

static PyObject *
complex_remainder(PyObject *v, PyObject *w)
{
        Py_complex div, mod;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);

	if (PyErr_Warn(PyExc_DeprecationWarning,
		       "complex divmod(), // and % are deprecated") < 0)
		return NULL;

	errno = 0;
	div = c_quot(a, b); /* The raw divisor value. */
	if (errno == EDOM) {
		PyErr_SetString(PyExc_ZeroDivisionError, "complex remainder");
		return NULL;
	}
	div.real = floor(div.real); /* Use the floor of the real part. */
	div.imag = 0.0;
	mod = c_diff(a, c_prod(b, div));

	return PyComplex_FromCComplex(mod);
}


static PyObject *
complex_divmod(PyObject *v, PyObject *w)
{
        Py_complex div, mod;
	PyObject *d, *m, *z;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);

	if (PyErr_Warn(PyExc_DeprecationWarning,
		       "complex divmod(), // and % are deprecated") < 0)
		return NULL;

	errno = 0;
	div = c_quot(a, b); /* The raw divisor value. */
	if (errno == EDOM) {
		PyErr_SetString(PyExc_ZeroDivisionError, "complex divmod()");
		return NULL;
	}
	div.real = floor(div.real); /* Use the floor of the real part. */
	div.imag = 0.0;
	mod = c_diff(a, c_prod(b, div));
	d = PyComplex_FromCComplex(div);
	m = PyComplex_FromCComplex(mod);
	z = PyTuple_Pack(2, d, m);
	Py_XDECREF(d);
	Py_XDECREF(m);
	return z;
}

static PyObject *
complex_pow(PyObject *v, PyObject *w, PyObject *z)
{
	Py_complex p;
	Py_complex exponent;
	long int_exponent;
	Py_complex a, b;
        TO_COMPLEX(v, a);
        TO_COMPLEX(w, b);

 	if (z != Py_None) {
		PyErr_SetString(PyExc_ValueError, "complex modulo");
		return NULL;
	}
	PyFPE_START_PROTECT("complex_pow", return 0)
	errno = 0;
	exponent = b;
	int_exponent = (long)exponent.real;
	if (exponent.imag == 0. && exponent.real == int_exponent)
		p = c_powi(a, int_exponent);
	else
		p = c_pow(a, exponent);

	PyFPE_END_PROTECT(p)
	Py_ADJUST_ERANGE2(p.real, p.imag);
	if (errno == EDOM) {
		PyErr_SetString(PyExc_ZeroDivisionError,
				"0.0 to a negative or complex power");
		return NULL;
	}
	else if (errno == ERANGE) {
		PyErr_SetString(PyExc_OverflowError,
				"complex exponentiaion");
		return NULL;
	}
	return PyComplex_FromCComplex(p);
}

static PyObject *
complex_int_div(PyObject *v, PyObject *w)
{
	PyObject *t, *r;
	
	t = complex_divmod(v, w);
	if (t != NULL) {
		r = PyTuple_GET_ITEM(t, 0);
		Py_INCREF(r);
		Py_DECREF(t);
		return r;
	}
	return NULL;
}

static PyObject *
complex_neg(PyComplexObject *v)
{
	Py_complex neg;
	neg.real = -v->cval.real;
	neg.imag = -v->cval.imag;
	return PyComplex_FromCComplex(neg);
}

static PyObject *
complex_pos(PyComplexObject *v)
{
	if (PyComplex_CheckExact(v)) {
		Py_INCREF(v);
		return (PyObject *)v;
	}
	else
		return PyComplex_FromCComplex(v->cval);
}

static PyObject *
complex_abs(PyComplexObject *v)
{
	double result;
	PyFPE_START_PROTECT("complex_abs", return 0)
	result = hypot(v->cval.real,v->cval.imag);
	PyFPE_END_PROTECT(result)
	return PyFloat_FromDouble(result);
}

static int
complex_nonzero(PyComplexObject *v)
{
	return v->cval.real != 0.0 || v->cval.imag != 0.0;
}

static int
complex_coerce(PyObject **pv, PyObject **pw)
{
	Py_complex cval;
	cval.imag = 0.;
	if (PyInt_Check(*pw)) {
		cval.real = (double)PyInt_AsLong(*pw);
		*pw = PyComplex_FromCComplex(cval);
		Py_INCREF(*pv);
		return 0;
	}
	else if (PyLong_Check(*pw)) {
		cval.real = PyLong_AsDouble(*pw);
		if (cval.real == -1.0 && PyErr_Occurred())
			return -1;
		*pw = PyComplex_FromCComplex(cval);
		Py_INCREF(*pv);
		return 0;
	}
	else if (PyFloat_Check(*pw)) {
		cval.real = PyFloat_AsDouble(*pw);
		*pw = PyComplex_FromCComplex(cval);
		Py_INCREF(*pv);
		return 0;
	}
	else if (PyComplex_Check(*pw)) {
		Py_INCREF(*pv);
		Py_INCREF(*pw);
		return 0;
	}
	return 1; /* Can't do it */
}

static PyObject *
complex_richcompare(PyObject *v, PyObject *w, int op)
{
	int c;
	Py_complex i, j;
	PyObject *res;

	c = PyNumber_CoerceEx(&v, &w);
	if (c < 0)
		return NULL;
	if (c > 0) {
		Py_INCREF(Py_NotImplemented);
		return Py_NotImplemented;
	}
	/* Make sure both arguments are complex. */
	if (!(PyComplex_Check(v) && PyComplex_Check(w))) {
		Py_DECREF(v);
		Py_DECREF(w);
		Py_INCREF(Py_NotImplemented);
		return Py_NotImplemented;
	}

	i = ((PyComplexObject *)v)->cval;
	j = ((PyComplexObject *)w)->cval;
	Py_DECREF(v);
	Py_DECREF(w);

	if (op != Py_EQ && op != Py_NE) {
		PyErr_SetString(PyExc_TypeError,
			"no ordering relation is defined for complex numbers");
		return NULL;
	}

	if ((i.real == j.real && i.imag == j.imag) == (op == Py_EQ))
		res = Py_True;
	else
		res = Py_False;

	Py_INCREF(res);
	return res;
}

static PyObject *
complex_int(PyObject *v)
{
	PyErr_SetString(PyExc_TypeError,
		   "can't convert complex to int; use int(abs(z))");
	return NULL;
}

static PyObject *
complex_long(PyObject *v)
{
	PyErr_SetString(PyExc_TypeError,
		   "can't convert complex to long; use long(abs(z))");
	return NULL;
}

static PyObject *
complex_float(PyObject *v)
{
	PyErr_SetString(PyExc_TypeError,
		   "can't convert complex to float; use abs(z)");
	return NULL;
}

static PyObject *
complex_conjugate(PyObject *self)
{
	Py_complex c;
	c = ((PyComplexObject *)self)->cval;
	c.imag = -c.imag;
	return PyComplex_FromCComplex(c);
}

static PyObject *
complex_getnewargs(PyComplexObject *v)
{
	return Py_BuildValue("(D)", &v->cval);
}

static PyMethodDef complex_methods[] = {
	{"conjugate",	(PyCFunction)complex_conjugate,	METH_NOARGS},
	{"__getnewargs__",	(PyCFunction)complex_getnewargs,	METH_NOARGS},
	{NULL,		NULL}		/* sentinel */
};

static PyMemberDef complex_members[] = {
	{"real", T_DOUBLE, offsetof(PyComplexObject, cval.real), READONLY,
	 "the real part of a complex number"},
	{"imag", T_DOUBLE, offsetof(PyComplexObject, cval.imag), READONLY,
	 "the imaginary part of a complex number"},
	{0},
};

static PyObject *
complex_subtype_from_string(PyTypeObject *type, PyObject *v)
{
	const char *s, *start;
	char *end;
	double x=0.0, y=0.0, z;
	int got_re=0, got_im=0, done=0;
	int digit_or_dot;
	int sw_error=0;
	int sign;
	char buffer[256]; /* For errors */
#ifdef Py_USING_UNICODE
	char s_buffer[256];
#endif
	Py_ssize_t len;

	if (PyString_Check(v)) {
		s = PyString_AS_STRING(v);
		len = PyString_GET_SIZE(v);
	}
#ifdef Py_USING_UNICODE
	else if (PyUnicode_Check(v)) {
		if (PyUnicode_GET_SIZE(v) >= (Py_ssize_t)sizeof(s_buffer)) {
			PyErr_SetString(PyExc_ValueError,
				 "complex() literal too large to convert");
			return NULL;
		}
		if (PyUnicode_EncodeDecimal(PyUnicode_AS_UNICODE(v),
					    PyUnicode_GET_SIZE(v),
					    s_buffer,
					    NULL))
			return NULL;
		s = s_buffer;
		len = strlen(s);
	}
#endif
	else if (PyObject_AsCharBuffer(v, &s, &len)) {
		PyErr_SetString(PyExc_TypeError,
				"complex() arg is not a string");
		return NULL;
	}

	/* position on first nonblank */
	start = s;
	while (*s && isspace(Py_CHARMASK(*s)))
		s++;
	if (s[0] == '\0') {
		PyErr_SetString(PyExc_ValueError,
				"complex() arg is an empty string");
		return NULL;
	}

	z = -1.0;
	sign = 1;
	do {

		switch (*s) {

		case '\0':
			if (s-start != len) {
				PyErr_SetString(
					PyExc_ValueError,
					"complex() arg contains a null byte");
				return NULL;
			}
			if(!done) sw_error=1;
			break;

		case '-':
			sign = -1;
				/* Fallthrough */
		case '+':
			if (done)  sw_error=1;
			s++;
			if  (  *s=='\0'||*s=='+'||*s=='-'  ||
			       isspace(Py_CHARMASK(*s))  )  sw_error=1;
			break;

		case 'J':
		case 'j':
			if (got_im || done) {
				sw_error = 1;
				break;
			}
			if  (z<0.0) {
				y=sign;
			}
			else{
				y=sign*z;
			}
			got_im=1;
			s++;
			if  (*s!='+' && *s!='-' )
				done=1;
			break;

		default:
			if (isspace(Py_CHARMASK(*s))) {
				while (*s && isspace(Py_CHARMASK(*s)))
					s++;
				if (s[0] != '\0')
					sw_error=1;
				else
					done = 1;
				break;
			}
			digit_or_dot =
				(*s=='.' || isdigit(Py_CHARMASK(*s)));
			if  (done||!digit_or_dot) {
				sw_error=1;
				break;
			}
			errno = 0;
			PyFPE_START_PROTECT("strtod", return 0)
				z = PyOS_ascii_strtod(s, &end) ;
			PyFPE_END_PROTECT(z)
				if (errno != 0) {
					PyOS_snprintf(buffer, sizeof(buffer),
					  "float() out of range: %.150s", s);
					PyErr_SetString(
						PyExc_ValueError,
						buffer);
					return NULL;
				}
			s=end;
			if  (*s=='J' || *s=='j') {

				break;
			}
			if  (got_re) {
				sw_error=1;
				break;
			}

				/* accept a real part */
			x=sign*z;
			got_re=1;
			if  (got_im)  done=1;
			z = -1.0;
			sign = 1;
			break;

		}  /* end of switch  */

	} while (s - start < len && !sw_error);

	if (sw_error) {
		PyErr_SetString(PyExc_ValueError,
				"complex() arg is a malformed string");
		return NULL;
	}

	return complex_subtype_from_doubles(type, x, y);
}

static PyObject *
complex_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
	PyObject *r, *i, *tmp, *f;
	PyNumberMethods *nbr, *nbi = NULL;
	Py_complex cr, ci;
	int own_r = 0;
	static PyObject *complexstr;
	static char *kwlist[] = {"real", "imag", 0};

	r = Py_False;
	i = NULL;
	if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO:complex", kwlist,
					 &r, &i))
		return NULL;

	/* Special-case for single argument that is already complex */
	if (PyComplex_CheckExact(r) && i == NULL &&
	    type == &PyComplex_Type) {
		/* Note that we can't know whether it's safe to return
		   a complex *subclass* instance as-is, hence the restriction
		   to exact complexes here.  */
		Py_INCREF(r);
		return r;
	}
	if (PyString_Check(r) || PyUnicode_Check(r)) {
		if (i != NULL) {
			PyErr_SetString(PyExc_TypeError,
					"complex() can't take second arg"
					" if first is a string");
			return NULL;
                }
		return complex_subtype_from_string(type, r);
	}
	if (i != NULL && (PyString_Check(i) || PyUnicode_Check(i))) {
		PyErr_SetString(PyExc_TypeError,
				"complex() second arg can't be a string");
		return NULL;
	}

	/* XXX Hack to support classes with __complex__ method */
	if (complexstr == NULL) {
		complexstr = PyString_InternFromString("__complex__");
		if (complexstr == NULL)
			return NULL;
	}
	f = PyObject_GetAttr(r, complexstr);
	if (f == NULL)
		PyErr_Clear();
	else {
		PyObject *args = PyTuple_New(0);
		if (args == NULL)
			return NULL;
		r = PyEval_CallObject(f, args);
		Py_DECREF(args);
		Py_DECREF(f);
		if (r == NULL)
			return NULL;
		own_r = 1;
	}
	nbr = r->ob_type->tp_as_number;
	if (i != NULL)
		nbi = i->ob_type->tp_as_number;
	if (nbr == NULL || nbr->nb_float == NULL ||
	    ((i != NULL) && (nbi == NULL || nbi->nb_float == NULL))) {
		PyErr_SetString(PyExc_TypeError,
			   "complex() argument must be a string or a number");
		if (own_r) {
			Py_DECREF(r);
		}
		return NULL;
	}
	if (PyComplex_Check(r)) {
		/* Note that if r is of a complex subtype, we're only
		   retaining its real & imag parts here, and the return
		   value is (properly) of the builtin complex type. */
		cr = ((PyComplexObject*)r)->cval;
		if (own_r) {
			Py_DECREF(r);
		}
	}
	else {
		tmp = PyNumber_Float(r);
		if (own_r) {
			Py_DECREF(r);
		}
		if (tmp == NULL)
			return NULL;
		if (!PyFloat_Check(tmp)) {
			PyErr_SetString(PyExc_TypeError,
					"float(r) didn't return a float");
			Py_DECREF(tmp);
			return NULL;
		}
		cr.real = PyFloat_AsDouble(tmp);
		Py_DECREF(tmp);
		cr.imag = 0.0;
	}
	if (i == NULL) {
		ci.real = 0.0;
		ci.imag = 0.0;
	}
	else if (PyComplex_Check(i))
		ci = ((PyComplexObject*)i)->cval;
	else {
		tmp = (*nbi->nb_float)(i);
		if (tmp == NULL)
			return NULL;
		ci.real = PyFloat_AsDouble(tmp);
		Py_DECREF(tmp);
		ci.imag = 0.;
	}
	cr.real -= ci.imag;
	cr.imag += ci.real;
	return complex_subtype_from_c_complex(type, cr);
}

PyDoc_STRVAR(complex_doc,
"complex(real[, imag]) -> complex number\n"
"\n"
"Create a complex number from a real part and an optional imaginary part.\n"
"This is equivalent to (real + imag*1j) where imag defaults to 0.");

static PyNumberMethods complex_as_number = {
	(binaryfunc)complex_add, 		/* nb_add */
	(binaryfunc)complex_sub, 		/* nb_subtract */
	(binaryfunc)complex_mul, 		/* nb_multiply */
	(binaryfunc)complex_remainder,		/* nb_remainder */
	(binaryfunc)complex_divmod,		/* nb_divmod */
	(ternaryfunc)complex_pow,		/* nb_power */
	(unaryfunc)complex_neg,			/* nb_negative */
	(unaryfunc)complex_pos,			/* nb_positive */
	(unaryfunc)complex_abs,			/* nb_absolute */
	(inquiry)complex_nonzero,		/* nb_nonzero */
	0,					/* nb_invert */
	0,					/* nb_lshift */
	0,					/* nb_rshift */
	0,					/* nb_and */
	0,					/* nb_xor */
	0,					/* nb_or */
	complex_coerce,				/* nb_coerce */
	complex_int,				/* nb_int */
	complex_long,				/* nb_long */
	complex_float,				/* nb_float */
	0,					/* nb_oct */
	0,					/* nb_hex */
	0,					/* nb_inplace_add */
	0,					/* nb_inplace_subtract */
	0,					/* nb_inplace_multiply*/
	0,					/* nb_inplace_remainder */
	0, 					/* nb_inplace_power */
	0,					/* nb_inplace_lshift */
	0,					/* nb_inplace_rshift */
	0,					/* nb_inplace_and */
	0,					/* nb_inplace_xor */
	0,					/* nb_inplace_or */
	(binaryfunc)complex_int_div,		/* nb_floor_divide */
	(binaryfunc)complex_div,		/* nb_true_divide */
	0,					/* nb_inplace_floor_divide */
	0,					/* nb_inplace_true_divide */
};

PyTypeObject PyComplex_Type = {
	PyObject_HEAD_INIT(&PyType_Type)
	0,
	"complex",
	sizeof(PyComplexObject),
	0,
	complex_dealloc,			/* tp_dealloc */
	(printfunc)complex_print,		/* tp_print */
	0,					/* tp_getattr */
	0,					/* tp_setattr */
	0,					/* tp_compare */
	(reprfunc)complex_repr,			/* tp_repr */
	&complex_as_number,    			/* tp_as_number */
	0,					/* tp_as_sequence */
	0,					/* tp_as_mapping */
	(hashfunc)complex_hash, 		/* tp_hash */
	0,					/* tp_call */
	(reprfunc)complex_str,			/* tp_str */
	PyObject_GenericGetAttr,		/* tp_getattro */
	0,					/* tp_setattro */
	0,					/* tp_as_buffer */
	Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
	complex_doc,				/* tp_doc */
	0,					/* tp_traverse */
	0,					/* tp_clear */
	complex_richcompare,			/* tp_richcompare */
	0,					/* tp_weaklistoffset */
	0,					/* tp_iter */
	0,					/* tp_iternext */
	complex_methods,			/* tp_methods */
	complex_members,			/* tp_members */
	0,					/* tp_getset */
	0,					/* tp_base */
	0,					/* tp_dict */
	0,					/* tp_descr_get */
	0,					/* tp_descr_set */
	0,					/* tp_dictoffset */
	0,					/* tp_init */
	PyType_GenericAlloc,			/* tp_alloc */
	complex_new,				/* tp_new */
	PyObject_Del,           		/* tp_free */
};

#endif
in memory with a value USAGE herr_t H5Dfill(fill, fill_type, space, buf, buf_type) const void *fill; IN: Pointer to fill value to use hid_t fill_type_id; IN: Datatype of the fill value void *buf; IN/OUT: Memory buffer to fill selection within hid_t buf_type_id; IN: Datatype of the elements in buffer hid_t space_id; IN: Dataspace describing memory buffer & containing selection to use. RETURNS Non-negative on success/Negative on failure. DESCRIPTION Use the selection in the dataspace to fill elements in a memory buffer. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS If "fill" parameter is NULL, use all zeros as fill value EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ herr_t H5Dfill(const void *fill, hid_t fill_type_id, void *buf, hid_t buf_type_id, hid_t space_id) { H5S_t *space; /* Dataspace */ H5T_t *fill_type; /* Fill-value datatype */ H5T_t *buf_type; /* Buffer datatype */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_API(H5Dfill, FAIL) H5TRACE5("e","xixii",fill,fill_type_id,buf,buf_type_id,space_id); /* Check args */ if (buf==NULL) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid buffer") if (NULL == (space=H5I_object_verify(space_id, H5I_DATASPACE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a dataspace") if (NULL == (fill_type=H5I_object_verify(fill_type_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype") if (NULL == (buf_type=H5I_object_verify(buf_type_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype") /* Fill the selection in the memory buffer */ if(H5D_fill(fill,fill_type,buf,buf_type,space, H5AC_dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed") done: FUNC_LEAVE_API(ret_value) } /* H5Dfill() */ /*-------------------------------------------------------------------------- NAME H5D_fill PURPOSE Fill a selection in memory with a value (internal version) USAGE herr_t H5D_fill(fill, fill_type, buf, buf_type, space) const void *fill; IN: Pointer to fill value to use H5T_t *fill_type; IN: Datatype of the fill value void *buf; IN/OUT: Memory buffer to fill selection within H5T_t *buf_type; IN: Datatype of the elements in buffer H5S_t *space; IN: Dataspace describing memory buffer & containing selection to use. RETURNS Non-negative on success/Negative on failure. DESCRIPTION Use the selection in the dataspace to fill elements in a memory buffer. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS If "fill" parameter is NULL, use all zeros as fill value. If "fill_type" parameter is NULL, use "buf_type" for the fill value datatype. EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ static herr_t H5D_fill(const void *fill, const H5T_t *fill_type, void *buf, const H5T_t *buf_type, const H5S_t *space, hid_t dxpl_id) { H5T_path_t *tpath = NULL; /* Conversion information*/ uint8_t *tconv_buf = NULL; /* Data type conv buffer */ uint8_t *bkg_buf = NULL; /* Temp conversion buffer */ hid_t src_id = -1, dst_id = -1; /* Temporary type IDs */ size_t src_type_size; /* Size of source type */ size_t dst_type_size; /* Size of destination type*/ size_t buf_size; /* Desired buffer size */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_fill) /* Check args */ assert(fill_type); assert(buf); assert(buf_type); assert(space); /* Make sure the dataspace has an extent set (or is NULL) */ if( !(H5S_has_extent(space)) ) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dataspace extent has not been set") /* Get the memory and file datatype sizes */ src_type_size = H5T_get_size(fill_type); dst_type_size = H5T_get_size(buf_type); /* Get the maximum buffer size needed and allocate it */ buf_size=MAX(src_type_size,dst_type_size); if (NULL==(tconv_buf = H5FL_BLK_MALLOC(type_elem,buf_size)) || NULL==(bkg_buf = H5FL_BLK_CALLOC(type_elem,buf_size))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed") /* Check for actual fill value to replicate */ if(fill==NULL) /* If there's no fill value, just use zeros */ HDmemset(tconv_buf,0,dst_type_size); else { /* Copy the user's data into the buffer for conversion */ HDmemcpy(tconv_buf,fill,src_type_size); /* Set up type conversion function */ if (NULL == (tpath = H5T_path_find(fill_type, buf_type, NULL, NULL, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dest data types") /* Convert memory buffer into disk buffer */ if (!H5T_path_noop(tpath)) { if ((src_id = H5I_register(H5I_DATATYPE, H5T_copy(fill_type, H5T_COPY_ALL)))<0 || (dst_id = H5I_register(H5I_DATATYPE, H5T_copy(buf_type, H5T_COPY_ALL)))<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register types for conversion") /* Perform data type conversion */ if (H5T_convert(tpath, src_id, dst_id, 1, 0, 0, tconv_buf, bkg_buf, dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "data type conversion failed") } /* end if */ } /* end if */ /* Fill the selection in the memory buffer */ if(H5S_select_fill(tconv_buf, dst_type_size, space, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "filling selection failed") done: if (tconv_buf) H5FL_BLK_FREE(type_elem,tconv_buf); if (bkg_buf) H5FL_BLK_FREE(type_elem,bkg_buf); FUNC_LEAVE_NOAPI(ret_value) } /* H5D_fill() */ /*-------------------------------------------------------------------------- NAME H5D_get_dxpl_cache_real PURPOSE Get all the values for the DXPL cache. USAGE herr_t H5D_get_dxpl_cache_real(dxpl_id, cache) hid_t dxpl_id; IN: DXPL to query H5D_dxpl_cache_t *cache;IN/OUT: DXPL cache to fill with values RETURNS Non-negative on success/Negative on failure. DESCRIPTION Query all the values from a DXPL that are needed by internal routines within the library. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ herr_t H5D_get_dxpl_cache_real(hid_t dxpl_id, H5D_dxpl_cache_t *cache) { H5P_genplist_t *dx_plist; /* Data transfer property list */ herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5D_get_dxpl_cache_real,FAIL) /* Check args */ assert(cache); /* Get the dataset transfer property list */ if (NULL == (dx_plist = H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list") /* Get maximum temporary buffer size */ if(H5P_get(dx_plist, H5D_XFER_MAX_TEMP_BUF_NAME, &cache->max_temp_buf)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve maximum temporary buffer size") /* Get temporary buffer pointer */ if(H5P_get(dx_plist, H5D_XFER_TCONV_BUF_NAME, &cache->tconv_buf)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve temporary buffer pointer") /* Get background buffer pointer */ if(H5P_get(dx_plist, H5D_XFER_BKGR_BUF_NAME, &cache->bkgr_buf)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve background buffer pointer") /* Get background buffer type */ if(H5P_get(dx_plist, H5D_XFER_BKGR_BUF_TYPE_NAME, &cache->bkgr_buf_type)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve background buffer type") /* Get B-tree split ratios */ if(H5P_get(dx_plist, H5D_XFER_BTREE_SPLIT_RATIO_NAME, &cache->btree_split_ratio)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve B-tree split ratios") /* Get I/O vector size */ if(H5P_get(dx_plist, H5D_XFER_HYPER_VECTOR_SIZE_NAME, &cache->vec_size)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve I/O vector size") #ifdef H5_HAVE_PARALLEL /* Collect Parallel I/O information for possible later use */ if(H5P_get(dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &cache->xfer_mode)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve parallel transfer method") #endif /*H5_HAVE_PARALLEL*/ /* Get error detection properties */ if(H5P_get(dx_plist, H5D_XFER_EDC_NAME, &cache->err_detect)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve error detection info") /* Get filter callback function */ if(H5P_get(dx_plist, H5D_XFER_FILTER_CB_NAME, &cache->filter_cb)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve filter callback function") /* Get the data transform property */ if(H5P_get(dx_plist, H5D_XFER_XFORM_NAME, &cache->data_xform_prop)<0) HGOTO_ERROR (H5E_PLIST, H5E_CANTGET, FAIL, "Can't retrieve data transform info") done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D_get_dxpl_cache_real() */ /*-------------------------------------------------------------------------- NAME H5D_get_dxpl_cache PURPOSE Get all the values for the DXPL cache. USAGE herr_t H5D_get_dxpl_cache(dxpl_id, cache) hid_t dxpl_id; IN: DXPL to query H5D_dxpl_cache_t *cache;IN/OUT: DXPL cache to fill with values RETURNS Non-negative on success/Negative on failure. DESCRIPTION Query all the values from a DXPL that are needed by internal routines within the library. GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS The CACHE pointer should point at already allocated memory to place non-default property list info. If a default property list is used, the CACHE pointer will be changed to point at the default information. EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ herr_t H5D_get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache) { herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(H5D_get_dxpl_cache,FAIL) /* Check args */ assert(cache); /* Check for the default DXPL */ if(dxpl_id==H5P_DATASET_XFER_DEFAULT) *cache=&H5D_def_dxpl_cache; else if(H5D_get_dxpl_cache_real(dxpl_id,*cache)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTGET, FAIL, "Can't retrieve DXPL values") done: FUNC_LEAVE_NOAPI(ret_value) } /* H5D_get_dxpl_cache() */ /*------------------------------------------------------------------------- * Function: H5Dread * * Purpose: Reads (part of) a DSET from the file into application * memory BUF. The part of the dataset to read is defined with * MEM_SPACE_ID and FILE_SPACE_ID. The data points are * converted from their file type to the MEM_TYPE_ID specified. * Additional miscellaneous data transfer properties can be * passed to this function with the PLIST_ID argument. * * The FILE_SPACE_ID can be the constant H5S_ALL which indicates * that the entire file data space is to be referenced. * * The MEM_SPACE_ID can be the constant H5S_ALL in which case * the memory data space is the same as the file data space * defined when the dataset was created. * * The number of elements in the memory data space must match * the number of elements in the file data space. * * The PLIST_ID can be the constant H5P_DEFAULT in which * case the default data transfer properties are used. * * Return: Non-negative on success/Negative on failure * * Errors: * ARGS BADTYPE Not a data space. * ARGS BADTYPE Not a data type. * ARGS BADTYPE Not a dataset. * ARGS BADTYPE Not xfer parms. * ARGS BADVALUE No output buffer. * DATASET READERROR Can't read data. * * Programmer: Robb Matzke * Thursday, December 4, 1997 * * Modifications: * *------------------------------------------------------------------------- */ herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf/*out*/) { H5D_t *dset = NULL; const H5S_t *mem_space = NULL; const H5S_t *file_space = NULL; herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_API(H5Dread, FAIL) H5TRACE6("e","iiiiix",dset_id,mem_type_id,mem_space_id,file_space_id, plist_id,buf); /* check arguments */ if (NULL == (dset = H5I_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") if (NULL == dset->ent.file) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") if (H5S_ALL != mem_space_id) { if (NULL == (mem_space = H5I_object_verify(mem_space_id, H5I_DATASPACE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") /* Check for valid selection */ if(H5S_SELECT_VALID(mem_space)!=TRUE) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } if (H5S_ALL != file_space_id) { if (NULL == (file_space = H5I_object_verify(file_space_id, H5I_DATASPACE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") /* Check for valid selection */ if(H5S_SELECT_VALID(file_space)!=TRUE) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } /* Get the default dataset transfer property list if the user didn't provide one */ if (H5P_DEFAULT == plist_id) plist_id= H5P_DATASET_XFER_DEFAULT; else if (TRUE!=H5P_isa_class(plist_id,H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") if (!buf && H5S_GET_SELECT_NPOINTS(file_space)!=0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer") /* read raw data */ if (H5D_read(dset, mem_type_id, mem_space, file_space, plist_id, buf/*out*/) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") done: FUNC_LEAVE_API(ret_value) } /*------------------------------------------------------------------------- * Function: H5Dwrite * * Purpose: Writes (part of) a DSET from application memory BUF to the * file. The part of the dataset to write is defined with the * MEM_SPACE_ID and FILE_SPACE_ID arguments. The data points * are converted from their current type (MEM_TYPE_ID) to their * file data type. Additional miscellaneous data transfer * properties can be passed to this function with the * PLIST_ID argument. * * The FILE_SPACE_ID can be the constant H5S_ALL which indicates * that the entire file data space is to be referenced. * * The MEM_SPACE_ID can be the constant H5S_ALL in which case * the memory data space is the same as the file data space * defined when the dataset was created. * * The number of elements in the memory data space must match * the number of elements in the file data space. * * The PLIST_ID can be the constant H5P_DEFAULT in which * case the default data transfer properties are used. * * Return: Non-negative on success/Negative on failure * * Errors: * * Programmer: Robb Matzke * Thursday, December 4, 1997 * * Modifications: * *------------------------------------------------------------------------- */ herr_t H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, const void *buf) { H5D_t *dset = NULL; const H5S_t *mem_space = NULL; const H5S_t *file_space = NULL; herr_t ret_value=SUCCEED; /* Return value */ FUNC_ENTER_API(H5Dwrite, FAIL) H5TRACE6("e","iiiiix",dset_id,mem_type_id,mem_space_id,file_space_id, plist_id,buf); /* check arguments */ if (NULL == (dset = H5I_object_verify(dset_id, H5I_DATASET))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") if (NULL == dset->ent.file) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") if (H5S_ALL != mem_space_id) { if (NULL == (mem_space = H5I_object_verify(mem_space_id, H5I_DATASPACE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") /* Check for valid selection */ if (H5S_SELECT_VALID(mem_space)!=TRUE) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } if (H5S_ALL != file_space_id) { if (NULL == (file_space = H5I_object_verify(file_space_id, H5I_DATASPACE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space") /* Check for valid selection */ if (H5S_SELECT_VALID(file_space)!=TRUE) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent") } /* Get the default dataset transfer property list if the user didn't provide one */ if (H5P_DEFAULT == plist_id) plist_id= H5P_DATASET_XFER_DEFAULT; else if (TRUE!=H5P_isa_class(plist_id,H5P_DATASET_XFER)) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms") if (!buf && H5S_GET_SELECT_NPOINTS(file_space)!=0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no output buffer") /* write raw data */ if (H5D_write(dset, mem_type_id, mem_space, file_space, plist_id, buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") done: FUNC_LEAVE_API(ret_value) } /*------------------------------------------------------------------------- * Function: H5D_read * * Purpose: Reads (part of) a DATASET into application memory BUF. See * H5Dread() for complete details. * * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke * Thursday, December 4, 1997 * * Modifications: * Robb Matzke, 1998-06-09 * The data space is no longer cached in the dataset struct. * * Robb Matzke, 1998-08-11 * Added timing calls around all the data space I/O functions. * * rky, 1998-09-18 * Added must_convert to do non-optimized read when necessary. * * Quincey Koziol, 1999-07-02 * Changed xfer_parms parameter to xfer plist parameter, so it * could be passed to H5T_convert. * * Albert Cheng, 2000-11-21 * Added the code that when it detects it is not safe to process a * COLLECTIVE read request without hanging, it changes it to * INDEPENDENT calls. * * Albert Cheng, 2000-11-27 * Changed to use the optimized MPIO transfer for Collective calls only. * * Raymond Lu, 2001-10-2 * Changed the way to retrieve property for generic property list. * * Raymond Lu, 2002-2-26 * For the new fill value design, data space can either be allocated * or not allocated at this stage. Fill value or data from space is * returned to outgoing buffer. * * QAK - 2002/04/02 * Removed the must_convert parameter and move preconditions to * H5S_<foo>_opt_possible() routine * *------------------------------------------------------------------------- */ static herr_t H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, const H5S_t *file_space, hid_t dxpl_id, void *buf/*out*/) { hssize_t snelmts; /*total number of elmts (signed) */ hsize_t nelmts; /*total number of elmts */ H5T_path_t *tpath = NULL; /*type conversion info */ const H5T_t *mem_type = NULL; /* Memory datatype */ H5D_io_info_t io_info; /* Dataset I/O info */ hbool_t use_par_opt_io=FALSE; /* Whether the 'optimized' I/O routines with be parallel */ #ifdef H5_HAVE_PARALLEL hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY int prop_value,new_value; htri_t check_prop; #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ #endif /*H5_HAVE_PARALLEL*/ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ unsigned sconv_flags=0; /* Flags for the space conversion */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_read) /* check args */ assert(dataset && dataset->ent.file); /* Get memory datatype */ if (NULL == (mem_type = H5I_object_verify(mem_type_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type") if (!file_space) file_space = dataset->shared->space; if (!mem_space) mem_space = file_space; if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space))<0) HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection") H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t); /* Fill the DXPL cache values for later use */ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") #ifdef H5_HAVE_PARALLEL /* Collective access is not permissible without a MPI based VFD */ if (dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPI(dataset->ent.file)) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based drivers only") /* Set the "parallel I/O possible" flag, for H5S_find(), if we are doing collective I/O */ /* (Don't set the parallel I/O possible flag for the MPI-posix driver, since it doesn't do real collective I/O) */ if (H5S_mpi_opt_types_g && dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPIPOSIX(dataset->ent.file)) sconv_flags |= H5S_CONV_PAR_IO_POSSIBLE; #endif /*H5_HAVE_PARALLEL*/ /* Make certain that the number of elements in each selection is the same */ if (nelmts!=(hsize_t)H5S_GET_SELECT_NPOINTS(file_space)) HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src and dest data spaces have different sizes") /* Make sure that both selections have their extents set */ if( !(H5S_has_extent(file_space)) ) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file dataspace does not have extent set") if( !(H5S_has_extent(mem_space)) ) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set") /* Retrieve dataset properties */ /* <none needed in the general case> */ /* If space hasn't been allocated and not using external storage, * return fill value to buffer if fill time is upon allocation, or * do nothing if fill time is never. If the dataset is compact and * fill time is NEVER, there is no way to tell whether part of data * has been overwritten. So just proceed in reading. */ if(nelmts > 0 && dataset->shared->efl.nused==0 && ((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))) { H5D_fill_value_t fill_status; /* Whether/How the fill value is defined */ /* Retrieve dataset's fill-value properties */ if(H5P_is_fill_value_defined(&dataset->shared->dcpl_cache.fill, &fill_status)<0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") /* Should be impossible, but check anyway... */ if(fill_status == H5D_FILL_VALUE_UNDEFINED && (dataset->shared->dcpl_cache.fill_time == H5D_FILL_TIME_ALLOC || dataset->shared->dcpl_cache.fill_time == H5D_FILL_TIME_IFSET)) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "read failed: dataset doesn't exist, no data can be read") /* If we're never going to fill this dataset, just leave the junk in the user's buffer */ if(dataset->shared->dcpl_cache.fill_time == H5D_FILL_TIME_NEVER) HGOTO_DONE(SUCCEED) /* Go fill the user's selection with the dataset's fill value */ if(H5D_fill(dataset->shared->dcpl_cache.fill.buf,dataset->shared->type,buf,mem_type,mem_space,dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "filling buf failed") else HGOTO_DONE(SUCCEED) } /* end if */ /* * Locate the type conversion function and data space conversion * functions, and set up the element numbering information. If a data * type conversion is necessary then register data type atoms. Data type * conversion is necessary if the user has set the `need_bkg' to a high * enough value in xfer_parms since turning off data type conversion also * turns off background preservation. */ if (NULL==(tpath=H5T_path_find(dataset->shared->type, mem_type, NULL, NULL, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dest data types") /* Set the storage flags for the space conversion check */ switch(dataset->shared->layout.type) { case H5D_COMPACT: sconv_flags |= H5S_CONV_STORAGE_COMPACT; break; case H5D_CONTIGUOUS: sconv_flags |= H5S_CONV_STORAGE_CONTIGUOUS; break; case H5D_CHUNKED: sconv_flags |= H5S_CONV_STORAGE_CHUNKED; break; default: assert(0 && "Unhandled layout type!"); } /* end switch */ /* Set up I/O operation */ if(H5D_ioinfo_init(dataset,dxpl_cache,dxpl_id,mem_space,file_space,sconv_flags,&use_par_opt_io,&io_info)<0) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation") #ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_INSTRUMENTED_LIBRARY /**** Test for collective chunk IO notice the following code should be removed after a more general collective chunk IO algorithm is applied. */ if(dataset->shared->layout.type == H5D_CHUNKED) { /*only check for chunking storage */ check_prop = H5Pexist(dxpl_id,H5D_XFER_COLL_CHUNK_NAME); if(check_prop < 0) HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list"); if(check_prop > 0) { if(H5Pget(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&prop_value)<0) HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value"); if(!use_par_opt_io) { new_value = 0; if(H5Pset(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&new_value)<0) HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value"); } } } /* end Test for collective chunk IO */ #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ /* Don't reset the transfer mode if we can't or won't use it */ if(!use_par_opt_io || !H5T_path_noop(tpath)) H5D_io_assist_mpio(dxpl_id, dxpl_cache, &xfer_mode_changed); #endif /*H5_HAVE_PARALLEL*/ /* Determine correct I/O routine to invoke */ if(dataset->shared->layout.type!=H5D_CHUNKED) { if(H5D_contig_read(&io_info, nelmts, mem_type, mem_space, file_space, tpath, dataset->shared->type_id, mem_type_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") } /* end if */ else { if(H5D_chunk_read(&io_info, nelmts, mem_type, mem_space, file_space, tpath, dataset->shared->type_id, mem_type_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data") } /* end else */ done: #ifdef H5_HAVE_PARALLEL /* Restore xfer_mode due to the kludge */ if (xfer_mode_changed) H5D_io_restore_mpio(dxpl_id); #endif /*H5_HAVE_PARALLEL*/ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_read() */ /*------------------------------------------------------------------------- * Function: H5D_write * * Purpose: Writes (part of) a DATASET to a file from application memory * BUF. See H5Dwrite() for complete details. * * Return: Non-negative on success/Negative on failure * * Programmer: Robb Matzke * Thursday, December 4, 1997 * * Modifications: * Robb Matzke, 9 Jun 1998 * The data space is no longer cached in the dataset struct. * * rky 980918 * Added must_convert to do non-optimized read when necessary. * * Quincey Koziol, 2 July 1999 * Changed xfer_parms parameter to xfer plist parameter, so it could * be passed to H5T_convert * * Albert Cheng, 2000-11-21 * Added the code that when it detects it is not safe to process a * COLLECTIVE write request without hanging, it changes it to * INDEPENDENT calls. * * Albert Cheng, 2000-11-27 * Changed to use the optimized MPIO transfer for Collective calls only. * * Raymond Lu, 2001-10-2 * Changed the way to retrieve property for generic property list. * * Raymond Lu, 2002-2-26 * For the new fill value design, space may not be allocated until * this function is called. Allocate and initialize space if it * hasn't been. * * QAK - 2002/04/02 * Removed the must_convert parameter and move preconditions to * H5S_<foo>_opt_possible() routine * * Nat Furrer and James Laird, 2004/6/7 * Added check for filter encode capability *------------------------------------------------------------------------- */ static herr_t H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space, const H5S_t *file_space, hid_t dxpl_id, const void *buf) { hssize_t snelmts; /*total number of elmts (signed) */ hsize_t nelmts; /*total number of elmts */ H5T_path_t *tpath = NULL; /*type conversion info */ const H5T_t *mem_type = NULL; /* Memory datatype */ H5D_io_info_t io_info; /* Dataset I/O info */ hbool_t use_par_opt_io=FALSE; /* Whether the 'optimized' I/O routines with be parallel */ #ifdef H5_HAVE_PARALLEL hbool_t xfer_mode_changed=FALSE; /* Whether the transfer mode was changed */ #ifdef H5_HAVE_INSTRUMENTED_LIBRARY int prop_value,new_value; htri_t check_prop; #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ #endif /*H5_HAVE_PARALLEL*/ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */ unsigned sconv_flags=0; /* Flags for the space conversion */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_write) /* check args */ assert(dataset && dataset->ent.file); /* Get the memory datatype */ if (NULL == (mem_type = H5I_object_verify(mem_type_id, H5I_DATATYPE))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type") /* All filters in the DCPL must have encoding enabled. */ if(! dataset->shared->checked_filters) { if(H5Z_can_apply(dataset->shared->dcpl_id, dataset->shared->type_id) <0) HGOTO_ERROR(H5E_PLINE, H5E_CANAPPLY, FAIL, "can't apply filters") dataset->shared->checked_filters = TRUE; } /* If MPI based VFD is used, no VL datatype support yet. */ /* This is because they use the global heap in the file and we don't */ /* support parallel access of that yet */ if (IS_H5FD_MPI(dataset->ent.file) && H5T_detect_class(mem_type, H5T_VLEN)>0) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing VL datatypes yet") /* If MPI based VFD is used, no VL datatype support yet. */ /* This is because they use the global heap in the file and we don't */ /* support parallel access of that yet */ /* We should really use H5T_detect_class() here, but it will be difficult * to detect the type of the reference if it is nested... -QAK */ if (IS_H5FD_MPI(dataset->ent.file) && H5T_get_class(mem_type, TRUE)==H5T_REFERENCE && H5T_get_ref_type(mem_type)==H5R_DATASET_REGION) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing region reference datatypes yet") /* Check if we are allowed to write to this file */ if (0==(H5F_get_intent(dataset->ent.file) & H5F_ACC_RDWR)) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "no write intent on file") /* Fill the DXPL cache values for later use */ if (H5D_get_dxpl_cache(dxpl_id,&dxpl_cache)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") if (!file_space) file_space = dataset->shared->space; if (!mem_space) mem_space = file_space; if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space))<0) HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection") H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t); #ifdef H5_HAVE_PARALLEL /* Collective access is not permissible without a MPI based VFD */ if (dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPI(dataset->ent.file)) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "collective access for MPI-based driver only") /* Set the "parallel I/O possible" flag, for H5S_find(), if we are doing collective I/O */ /* (Don't set the parallel I/O possible flag for the MPI-posix driver, since it doesn't do real collective I/O) */ if (H5S_mpi_opt_types_g && dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE && !IS_H5FD_MPIPOSIX(dataset->ent.file)) sconv_flags |= H5S_CONV_PAR_IO_POSSIBLE; #endif /*H5_HAVE_PARALLEL*/ /* Make certain that the number of elements in each selection is the same */ if (nelmts!=(hsize_t)H5S_GET_SELECT_NPOINTS(file_space)) HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src and dest data spaces have different sizes") /* Make sure that both selections have their extents set */ if( !(H5S_has_extent(file_space)) ) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file dataspace does not have extent set") if( !(H5S_has_extent(mem_space)) ) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "memory dataspace does not have extent set") /* Retrieve dataset properties */ /* <none needed currently> */ /* Allocate data space and initialize it if it hasn't been. */ if(nelmts > 0 && dataset->shared->efl.nused==0 && ((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))) { hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */ hbool_t full_overwrite; /* Whether we are over-writing all the elements */ /* Get the number of elements in file dataset's dataspace */ if((file_nelmts=H5S_GET_EXTENT_NPOINTS(file_space))<0) HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset") /* Always allow fill values to be written if the dataset has a VL datatype */ if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) full_overwrite=FALSE; else full_overwrite=(hsize_t)file_nelmts==nelmts ? TRUE : FALSE; /* Allocate storage */ if(H5D_alloc_storage(dataset->ent.file,dxpl_id,dataset,H5D_ALLOC_WRITE, TRUE, full_overwrite)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage") } /* end if */ /* * Locate the type conversion function and data space conversion * functions, and set up the element numbering information. If a data * type conversion is necessary then register data type atoms. Data type * conversion is necessary if the user has set the `need_bkg' to a high * enough value in xfer_parms since turning off data type conversion also * turns off background preservation. */ if (NULL==(tpath=H5T_path_find(mem_type, dataset->shared->type, NULL, NULL, dxpl_id))) HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dest data types") /* Set the storage flags for the space conversion check */ switch(dataset->shared->layout.type) { case H5D_COMPACT: sconv_flags |= H5S_CONV_STORAGE_COMPACT; break; case H5D_CONTIGUOUS: sconv_flags |= H5S_CONV_STORAGE_CONTIGUOUS; break; case H5D_CHUNKED: sconv_flags |= H5S_CONV_STORAGE_CHUNKED; break; default: assert(0 && "Unhandled layout type!"); } /* end switch */ /* Set up I/O operation */ if(H5D_ioinfo_init(dataset,dxpl_cache,dxpl_id,mem_space,file_space,sconv_flags,&use_par_opt_io,&io_info)<0) HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to set up I/O operation") #ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_INSTRUMENTED_LIBRARY /**** Test for collective chunk IO notice the following code should be removed after a more general collective chunk IO algorithm is applied. */ if(dataset->shared->layout.type == H5D_CHUNKED) { /*only check for chunking storage */ check_prop = H5Pexist(dxpl_id,H5D_XFER_COLL_CHUNK_NAME); if(check_prop < 0) HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list"); if(check_prop > 0) { if(H5Pget(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&prop_value)<0) HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to get property value"); if(!use_par_opt_io) { new_value = 0; if(H5Pset(dxpl_id,H5D_XFER_COLL_CHUNK_NAME,&new_value)<0) HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to set property value"); } } } #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ /* Don't reset the transfer mode if we can't or won't use it */ if(!use_par_opt_io || !H5T_path_noop(tpath)) H5D_io_assist_mpio(dxpl_id, dxpl_cache, &xfer_mode_changed); #endif /*H5_HAVE_PARALLEL*/ /* Determine correct I/O routine to invoke */ if(dataset->shared->layout.type!=H5D_CHUNKED) { if(H5D_contig_write(&io_info, nelmts, mem_type, mem_space, file_space, tpath, mem_type_id, dataset->shared->type_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") } /* end if */ else { if(H5D_chunk_write(&io_info, nelmts, mem_type, mem_space, file_space, tpath, mem_type_id, dataset->shared->type_id, buf)<0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data") } /* end else */ #ifdef OLD_WAY /* * This was taken out because it can be called in a parallel program with * independent access, causing the metadata cache to get corrupted. Its been * disabled for all types of access (serial as well as parallel) to make the * modification time consistent for all programs. -QAK */ /* * Update modification time. We have to do this explicitly because * writing to a dataset doesn't necessarily change the object header. */ if (H5O_touch(&(dataset->ent), FALSE, dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update modification time") #endif /* OLD_WAY */ done: #ifdef H5_HAVE_PARALLEL /* Restore xfer_mode due to the kludge */ if (xfer_mode_changed) H5D_io_restore_mpio(dxpl_id); #endif /*H5_HAVE_PARALLEL*/ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_write() */ /*------------------------------------------------------------------------- * Function: H5D_contig_read * * Purpose: Read from a contiguous dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * Leon Arber: 4/20/04 * Added support for data transforms. * *------------------------------------------------------------------------- */ static herr_t H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts, const H5T_t *mem_type, const H5S_t *mem_space, const H5S_t *file_space, H5T_path_t *tpath, hid_t src_id, hid_t dst_id, void *buf/*out*/) { H5D_t *dataset=io_info->dset; /* Local pointer to dataset info */ const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */ herr_t status; /*function return status*/ #ifdef H5S_DEBUG H5_timer_t timer; #endif size_t src_type_size; /*size of source type */ size_t dst_type_size; /*size of destination type*/ size_t max_type_size; /* Size of largest source/destination type */ size_t target_size; /*desired buffer size */ size_t request_nelmts; /*requested strip mine */ H5S_sel_iter_t mem_iter; /*memory selection iteration info*/ hbool_t mem_iter_init=0; /*memory selection iteration info has been initialized */ H5S_sel_iter_t bkg_iter; /*background iteration info*/ hbool_t bkg_iter_init=0; /*background iteration info has been initialized */ H5S_sel_iter_t file_iter; /*file selection iteration info*/ hbool_t file_iter_init=0; /*file selection iteration info has been initialized */ H5T_bkg_t need_bkg; /*type of background buf*/ uint8_t *tconv_buf = NULL; /*data type conv buffer */ uint8_t *bkg_buf = NULL; /*background buffer */ hsize_t smine_start; /*strip mine start loc */ size_t n, smine_nelmts; /*elements per strip */ H5D_storage_t store; /*union of storage info for dataset */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_contig_read) /* Initialize storage info for this dataset */ if (dataset->shared->efl.nused>0) HDmemcpy(&store.efl,&(dataset->shared->efl),sizeof(H5O_efl_t)); else { store.contig.dset_addr=dataset->shared->layout.u.contig.addr; store.contig.dset_size=dataset->shared->layout.u.contig.size; } /* end if */ /* Set dataset storage for I/O info */ io_info->store=&store; /* * If there is no type conversion then read directly into the * application's buffer. This saves at least one mem-to-mem copy. */ if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif /* Sanity check dataset, then read it */ assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) || dataset->shared->efl.nused>0 || 0 == nelmts || dataset->shared->layout.type==H5D_COMPACT); H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t); status = (io_info->ops.read)(io_info, (size_t)nelmts, H5T_get_size(dataset->shared->type), file_space, mem_space, buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].read_timer), &timer); io_info->stats->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->shared->type); io_info->stats->stats[1].read_ncalls++; #endif /* Check return value from optimized read */ if (status<0) { HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed") } else /* direct xfer accomplished successfully */ HGOTO_DONE(SUCCEED) } /* end if */ /* * This is the general case (type conversion, usually). */ if(nelmts==0) HGOTO_DONE(SUCCEED) /* Compute element sizes and other parameters */ src_type_size = H5T_get_size(dataset->shared->type); dst_type_size = H5T_get_size(mem_type); max_type_size = MAX(src_type_size, dst_type_size); target_size = dxpl_cache->max_temp_buf; /* XXX: This could cause a problem if the user sets their buffer size * to the same size as the default, and then the dataset elements are * too large for the buffer... - QAK */ if(target_size==H5D_XFER_MAX_TEMP_BUF_DEF) { /* If the buffer is too small to hold even one element, make it bigger */ if(target_size<max_type_size) target_size = max_type_size; /* If the buffer is too large to hold all the elements, make it smaller */ else if(target_size>(nelmts*max_type_size)) target_size=(size_t)(nelmts*max_type_size); } /* end if */ request_nelmts = target_size / max_type_size; /* Sanity check elements in temporary buffer */ if (request_nelmts==0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") /* Figure out the strip mine size. */ if (H5S_select_iter_init(&file_iter, file_space, src_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file selection information") file_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&mem_iter, mem_space, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&bkg_iter, mem_space, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information") bkg_iter_init=1; /*file selection iteration info has been initialized */ /* * Get a temporary buffer for type conversion unless the app has already * supplied one through the xfer properties. Instead of allocating a * buffer which is the exact size, we allocate the target size. The * malloc() is usually less resource-intensive if we allocate/free the * same size over and over. */ if (H5T_path_bkg(tpath)) { H5T_bkg_t path_bkg; /* Type conversion's background info */ /* Retrieve the bkgr buffer property */ need_bkg=dxpl_cache->bkgr_buf_type; path_bkg = H5T_path_bkg(tpath); need_bkg = MAX(path_bkg, need_bkg); } else { need_bkg = H5T_BKG_NO; /*never needed even if app says yes*/ } /* end else */ if (NULL==(tconv_buf=dxpl_cache->tconv_buf)) { /* Allocate temporary buffer */ if((tconv_buf=H5FL_BLK_MALLOC(type_conv,target_size))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for type conversion") } /* end if */ if (need_bkg && NULL==(bkg_buf=dxpl_cache->bkgr_buf)) { /* Allocate background buffer */ /* (Need calloc()-like call since memory needs to be initialized) */ if((bkg_buf=H5FL_BLK_CALLOC(type_conv,(request_nelmts*dst_type_size)))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for background conversion") } /* end if */ /* Start strip mining... */ for (smine_start=0; smine_start<nelmts; smine_start+=smine_nelmts) { /* Go figure out how many elements to read from the file */ assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(nelmts-smine_start)); smine_nelmts = (size_t)MIN(request_nelmts, (nelmts-smine_start)); /* * Gather the data from disk into the data type conversion * buffer. Also gather data from application to background buffer * if necessary. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif /* Sanity check that space is allocated, then read data from it */ assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) || dataset->shared->efl.nused>0 || dataset->shared->layout.type==H5D_COMPACT); n = H5D_select_fgath(io_info, file_space, &file_iter, smine_nelmts, tconv_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].gath_timer), &timer); io_info->stats->stats[1].gath_nbytes += n * src_type_size; io_info->stats->stats[1].gath_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file gather failed") if (H5T_BKG_YES==need_bkg) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif n = H5D_select_mgath(buf, mem_space, &bkg_iter, smine_nelmts, dxpl_cache, bkg_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].bkg_timer), &timer); io_info->stats->stats[1].bkg_nbytes += n * dst_type_size; io_info->stats->stats[1].bkg_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "mem gather failed") } /* end if */ /* * Perform data type conversion. */ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, io_info->dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed") /* Do the data transform after the conversion (since we're using type mem_type) */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, mem_type) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform") /* * Scatter the data into memory. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif status = H5D_select_mscat(tconv_buf, mem_space, &mem_iter, smine_nelmts, dxpl_cache, buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].scat_timer), &timer); io_info->stats->stats[1].scat_nbytes += smine_nelmts * dst_type_size; io_info->stats->stats[1].scat_ncalls++; #endif if (status<0) HGOTO_ERROR (H5E_DATASET, H5E_READERROR, FAIL, "scatter failed") } /* end for */ done: /* Release selection iterators */ if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(bkg_iter_init) { if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if (tconv_buf && NULL==dxpl_cache->tconv_buf) H5FL_BLK_FREE(type_conv,tconv_buf); if (bkg_buf && NULL==dxpl_cache->bkgr_buf) H5FL_BLK_FREE(type_conv,bkg_buf); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_contig_read() */ /*------------------------------------------------------------------------- * Function: H5D_contig_write * * Purpose: Write to a contiguous dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * Leon Arber: 4/20/04 * Added support for data transforms. * *------------------------------------------------------------------------- */ static herr_t H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts, const H5T_t *mem_type, const H5S_t *mem_space, const H5S_t *file_space, H5T_path_t *tpath, hid_t src_id, hid_t dst_id, const void *buf) { H5D_t *dataset=io_info->dset; /* Local pointer to dataset info */ const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */ herr_t status; /*function return status*/ #ifdef H5S_DEBUG H5_timer_t timer; #endif size_t src_type_size; /*size of source type */ size_t dst_type_size; /*size of destination type*/ size_t max_type_size; /* Size of largest source/destination type */ size_t target_size; /*desired buffer size */ size_t request_nelmts; /*requested strip mine */ H5S_sel_iter_t mem_iter; /*memory selection iteration info*/ hbool_t mem_iter_init=0; /*memory selection iteration info has been initialized */ H5S_sel_iter_t bkg_iter; /*background iteration info*/ hbool_t bkg_iter_init=0; /*background iteration info has been initialized */ H5S_sel_iter_t file_iter; /*file selection iteration info*/ hbool_t file_iter_init=0; /*file selection iteration info has been initialized */ H5T_bkg_t need_bkg; /*type of background buf*/ uint8_t *tconv_buf = NULL; /*data type conv buffer */ uint8_t *bkg_buf = NULL; /*background buffer */ hsize_t smine_start; /*strip mine start loc */ size_t n, smine_nelmts; /*elements per strip */ H5D_storage_t store; /*union of storage info for dataset */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_contig_write) /* Initialize storage info for this dataset */ if (dataset->shared->efl.nused>0) HDmemcpy(&store.efl,&(dataset->shared->efl),sizeof(H5O_efl_t)); else { store.contig.dset_addr=dataset->shared->layout.u.contig.addr; store.contig.dset_size=dataset->shared->layout.u.contig.size; } /* end if */ /* Set dataset storage for I/O info */ io_info->store=&store; /* * If there is no type conversion then write directly from the * application's buffer. This saves at least one mem-to-mem copy. */ if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t); status = (io_info->ops.write)(io_info, (size_t)nelmts, H5T_get_size(dataset->shared->type), file_space, mem_space, buf); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].write_timer), &timer); io_info->stats->stats[0].write_nbytes += nelmts * H5T_get_size(mem_type); io_info->stats->stats[0].write_ncalls++; #endif /* Check return value from optimized write */ if (status<0) { HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed") } else /* direct xfer accomplished successfully */ HGOTO_DONE(SUCCEED) } /* end if */ /* * This is the general case. */ if(nelmts==0) HGOTO_DONE(SUCCEED) /* Compute element sizes and other parameters */ src_type_size = H5T_get_size(mem_type); dst_type_size = H5T_get_size(dataset->shared->type); max_type_size = MAX(src_type_size, dst_type_size); target_size = dxpl_cache->max_temp_buf; /* XXX: This could cause a problem if the user sets their buffer size * to the same size as the default, and then the dataset elements are * too large for the buffer... - QAK */ if(target_size==H5D_XFER_MAX_TEMP_BUF_DEF) { /* If the buffer is too small to hold even one element, make it bigger */ if(target_size<max_type_size) target_size = max_type_size; /* If the buffer is too large to hold all the elements, make it smaller */ else if(target_size>(nelmts*max_type_size)) target_size=(size_t)(nelmts*max_type_size); } /* end if */ request_nelmts = target_size / max_type_size; /* Sanity check elements in temporary buffer */ if (request_nelmts==0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") /* Figure out the strip mine size. */ if (H5S_select_iter_init(&file_iter, file_space, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file selection information") file_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&mem_iter, mem_space, src_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&bkg_iter, file_space, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information") bkg_iter_init=1; /*file selection iteration info has been initialized */ /* * Get a temporary buffer for type conversion unless the app has already * supplied one through the xfer properties. Instead of allocating a * buffer which is the exact size, we allocate the target size. The * malloc() is usually less resource-intensive if we allocate/free the * same size over and over. */ if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) { /* Old data is retrieved into background buffer for VL datatype. The * data is used later for freeing heap objects. */ need_bkg = H5T_BKG_YES; } else if (H5T_path_bkg(tpath)) { H5T_bkg_t path_bkg; /* Type conversion's background info */ /* Retrieve the bkgr buffer property */ need_bkg=dxpl_cache->bkgr_buf_type; path_bkg = H5T_path_bkg(tpath); need_bkg = MAX (path_bkg, need_bkg); } else { need_bkg = H5T_BKG_NO; /*never needed even if app says yes*/ } /* end else */ if (NULL==(tconv_buf=dxpl_cache->tconv_buf)) { /* Allocate temporary buffer */ if((tconv_buf=H5FL_BLK_MALLOC(type_conv,target_size))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for type conversion") } /* end if */ if (need_bkg && NULL==(bkg_buf=dxpl_cache->bkgr_buf)) { /* Allocate background buffer */ /* (Don't need calloc()-like call since file data is already initialized) */ if((bkg_buf=H5FL_BLK_MALLOC(type_conv,(request_nelmts*dst_type_size)))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for background conversion") } /* end if */ /* Start strip mining... */ for (smine_start=0; smine_start<nelmts; smine_start+=smine_nelmts) { /* Go figure out how many elements to read from the file */ assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(nelmts-smine_start)); smine_nelmts = (size_t)MIN(request_nelmts, (nelmts-smine_start)); /* * Gather data from application buffer into the data type conversion * buffer. Also gather data from the file into the background buffer * if necessary. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif n = H5D_select_mgath(buf, mem_space, &mem_iter, smine_nelmts, dxpl_cache, tconv_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].gath_timer), &timer); io_info->stats->stats[0].gath_nbytes += n * src_type_size; io_info->stats->stats[0].gath_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "mem gather failed") if (H5T_BKG_YES==need_bkg) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif n = H5D_select_fgath(io_info, file_space, &bkg_iter, smine_nelmts, bkg_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].bkg_timer), &timer); io_info->stats->stats[0].bkg_nbytes += n * dst_type_size; io_info->stats->stats[0].bkg_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "file gather failed") } /* end if */ /* * Perform data type conversion. */ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, io_info->dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed") /* Do the data transform after the type conversion (since we're using dataset->shared->type). */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->shared->type) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform") /* * Scatter the data out to the file. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif status = H5D_select_fscat(io_info, file_space, &file_iter, smine_nelmts, tconv_buf); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].scat_timer), &timer); io_info->stats->stats[0].scat_nbytes += smine_nelmts * dst_type_size; io_info->stats->stats[0].scat_ncalls++; #endif if (status<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "scatter failed") } /* end for */ done: /* Release selection iterators */ if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(bkg_iter_init) { if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if (tconv_buf && NULL==dxpl_cache->tconv_buf) H5FL_BLK_FREE(type_conv,tconv_buf); if (bkg_buf && NULL==dxpl_cache->bkgr_buf) H5FL_BLK_FREE(type_conv,bkg_buf); FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_contig_write() */ /*------------------------------------------------------------------------- * Function: H5D_chunk_read * * Purpose: Read from a chunked dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * Leon Arber: 4/20/04 * Added support for data transforms. * *------------------------------------------------------------------------- */ static herr_t H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts, const H5T_t *mem_type, const H5S_t *mem_space, const H5S_t *file_space, H5T_path_t *tpath, hid_t src_id, hid_t dst_id, void *buf/*out*/) { H5D_t *dataset=io_info->dset; /* Local pointer to dataset info */ const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */ fm_map fm; /* File<->memory mapping */ H5SL_node_t *chunk_node; /* Current node in chunk skip list */ herr_t status; /*function return status*/ #ifdef H5S_DEBUG H5_timer_t timer; #endif size_t src_type_size; /*size of source type */ size_t dst_type_size; /*size of destination type*/ size_t max_type_size; /* Size of largest source/destination type */ size_t target_size; /*desired buffer size */ size_t request_nelmts; /*requested strip mine */ hsize_t smine_start; /*strip mine start loc */ size_t n, smine_nelmts; /*elements per strip */ H5S_sel_iter_t mem_iter; /*memory selection iteration info*/ hbool_t mem_iter_init=0; /*memory selection iteration info has been initialized */ H5S_sel_iter_t bkg_iter; /*background iteration info*/ hbool_t bkg_iter_init=0; /*background iteration info has been initialized */ H5S_sel_iter_t file_iter; /*file selection iteration info*/ hbool_t file_iter_init=0; /*file selection iteration info has been initialized */ H5T_bkg_t need_bkg; /*type of background buf*/ uint8_t *tconv_buf = NULL; /*data type conv buffer */ uint8_t *bkg_buf = NULL; /*background buffer */ H5D_storage_t store; /*union of EFL and chunk pointer in file space */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read) /* Map elements between file and memory for each chunk*/ if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping") /* Set dataset storage for I/O info */ io_info->store=&store; /* * If there is no type conversion then read directly into the * application's buffer. This saves at least one mem-to-mem copy. */ if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif /* Sanity check dataset, then read it */ assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) || dataset->shared->efl.nused>0 || 0 == nelmts || dataset->shared->layout.type==H5D_COMPACT); /* Get first node in chunk skip list */ chunk_node=H5SL_first(fm.fsel); /* Iterate through chunks to be operated on */ while(chunk_node) { H5D_chunk_info_t *chunk_info; /* chunk information */ /* Get the actual chunk information from the skip list node */ chunk_info=H5SL_item(chunk_node); /* Pass in chunk's coordinates in a union. */ store.chunk.offset = chunk_info->coords; store.chunk.index = chunk_info->index; /* Perform the actual read operation */ status = (io_info->ops.read)(io_info, chunk_info->chunk_points, H5T_get_size(dataset->shared->type), chunk_info->fspace, chunk_info->mspace, buf); /* Check return value from optimized read */ if (status<0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed") /* Get the next chunk node in the skip list */ chunk_node=H5SL_next(chunk_node); } /* end while */ #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].read_timer), &timer); io_info->stats->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->shared->type); io_info->stats->stats[1].read_ncalls++; #endif /* direct xfer accomplished successfully */ HGOTO_DONE(SUCCEED) } /* end if */ /* * This is the general case (type conversion, usually). */ if(nelmts==0) HGOTO_DONE(SUCCEED) /* Compute element sizes and other parameters */ src_type_size = H5T_get_size(dataset->shared->type); dst_type_size = H5T_get_size(mem_type); max_type_size = MAX(src_type_size, dst_type_size); target_size = dxpl_cache->max_temp_buf; /* XXX: This could cause a problem if the user sets their buffer size * to the same size as the default, and then the dataset elements are * too large for the buffer... - QAK */ if(target_size==H5D_XFER_MAX_TEMP_BUF_DEF) { /* If the buffer is too small to hold even one element, make it bigger */ if(target_size<max_type_size) target_size = max_type_size; /* If the buffer is too large to hold all the elements, make it smaller */ else if(target_size>(nelmts*max_type_size)) target_size=(size_t)(nelmts*max_type_size); } /* end if */ request_nelmts = target_size / max_type_size; /* Sanity check elements in temporary buffer */ if (request_nelmts==0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") /* * Get a temporary buffer for type conversion unless the app has already * supplied one through the xfer properties. Instead of allocating a * buffer which is the exact size, we allocate the target size. The * malloc() is usually less resource-intensive if we allocate/free the * same size over and over. */ if (H5T_path_bkg(tpath)) { H5T_bkg_t path_bkg; /* Type conversion's background info */ /* Retrieve the bkgr buffer property */ need_bkg=dxpl_cache->bkgr_buf_type; path_bkg = H5T_path_bkg(tpath); need_bkg = MAX(path_bkg, need_bkg); } else { need_bkg = H5T_BKG_NO; /*never needed even if app says yes*/ } /* end else */ if (NULL==(tconv_buf=dxpl_cache->tconv_buf)) { /* Allocate temporary buffer */ if((tconv_buf=H5FL_BLK_MALLOC(type_conv,target_size))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for type conversion") } /* end if */ if (need_bkg && NULL==(bkg_buf=dxpl_cache->bkgr_buf)) { /* Allocate background buffer */ /* (Need calloc()-like call since memory needs to be initialized) */ if((bkg_buf=H5FL_BLK_CALLOC(type_conv,(request_nelmts*dst_type_size)))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for background conversion") } /* end if */ /* Loop over all the chunks, performing I/O on each */ /* Get first node in chunk skip list */ chunk_node=H5SL_first(fm.fsel); /* Iterate through chunks to be operated on */ while(chunk_node) { H5D_chunk_info_t *chunk_info; /* chunk information */ /* Get the actual chunk information from the skip list nodes */ chunk_info=H5SL_item(chunk_node); /* initialize selection iterator */ if (H5S_select_iter_init(&file_iter, chunk_info->fspace, src_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file selection information") file_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&mem_iter, chunk_info->mspace, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&bkg_iter, chunk_info->mspace, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information") bkg_iter_init=1; /*file selection iteration info has been initialized */ /* Pass in chunk's coordinates in a union*/ store.chunk.offset = chunk_info->coords; store.chunk.index = chunk_info->index; for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) { /* Go figure out how many elements to read from the file */ assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(chunk_info->chunk_points-smine_start)); smine_nelmts = (size_t)MIN(request_nelmts, (chunk_info->chunk_points-smine_start)); /* * Gather the data from disk into the data type conversion * buffer. Also gather data from application to background buffer * if necessary. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif /* Sanity check that space is allocated, then read data from it */ assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr)) || (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr))) || dataset->shared->efl.nused>0 || dataset->shared->layout.type==H5D_COMPACT); n = H5D_select_fgath(io_info, chunk_info->fspace, &file_iter, smine_nelmts, tconv_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].gath_timer), &timer); io_info->stats->stats[1].gath_nbytes += n * src_type_size; io_info->stats->stats[1].gath_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file gather failed") if (H5T_BKG_YES==need_bkg) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif n = H5D_select_mgath(buf, chunk_info->mspace, &bkg_iter, smine_nelmts, dxpl_cache, bkg_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].bkg_timer), &timer); io_info->stats->stats[1].bkg_nbytes += n * dst_type_size; io_info->stats->stats[1].bkg_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "mem gather failed") } /* end if */ /* * Perform data type conversion. */ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, io_info->dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed") /* Do the data transform after the conversion (since we're using type mem_type) */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, mem_type) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform") /* * Scatter the data into memory. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif status = H5D_select_mscat(tconv_buf, chunk_info->mspace, &mem_iter, smine_nelmts, dxpl_cache, buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].scat_timer), &timer); io_info->stats->stats[1].scat_nbytes += smine_nelmts * dst_type_size; io_info->stats->stats[1].scat_ncalls++; #endif if (status<0) HGOTO_ERROR (H5E_DATASET, H5E_READERROR, FAIL, "scatter failed") } /* end for */ /* Release selection iterators */ if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") file_iter_init=0; } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") mem_iter_init=0; } /* end if */ if(bkg_iter_init) { if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") bkg_iter_init=0; } /* end if */ /* Get the next chunk node in the skip list */ chunk_node=H5SL_next(chunk_node); } /* end while */ done: /* Release selection iterators, if necessary */ if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(bkg_iter_init) { if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if (tconv_buf && NULL==dxpl_cache->tconv_buf) H5FL_BLK_FREE(type_conv,tconv_buf); if (bkg_buf && NULL==dxpl_cache->bkgr_buf) H5FL_BLK_FREE(type_conv,bkg_buf); /* Release chunk mapping information */ if(H5D_destroy_chunk_map(&fm) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't release chunk mapping") FUNC_LEAVE_NOAPI(ret_value) } /* H5D_chunk_read() */ /*------------------------------------------------------------------------- * Function: H5D_chunk_write * * Purpose: Writes to a chunked dataset. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * Leon Arber: 4/20/04 * Added support for data transforms. * *------------------------------------------------------------------------- */ static herr_t H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts, const H5T_t *mem_type, const H5S_t *mem_space, const H5S_t *file_space, H5T_path_t *tpath, hid_t src_id, hid_t dst_id, const void *buf) { H5D_t *dataset=io_info->dset; /* Local pointer to dataset info */ const H5D_dxpl_cache_t *dxpl_cache=io_info->dxpl_cache; /* Local pointer to dataset transfer info */ fm_map fm; /* File<->memory mapping */ H5SL_node_t *chunk_node; /* Current node in chunk skip list */ herr_t status; /*function return status*/ #ifdef H5S_DEBUG H5_timer_t timer; #endif size_t src_type_size; /*size of source type */ size_t dst_type_size; /*size of destination type*/ size_t max_type_size; /* Size of largest source/destination type */ size_t target_size; /*desired buffer size */ size_t request_nelmts; /*requested strip mine */ hsize_t smine_start; /*strip mine start loc */ size_t n, smine_nelmts; /*elements per strip */ H5S_sel_iter_t mem_iter; /*memory selection iteration info*/ hbool_t mem_iter_init=0; /*memory selection iteration info has been initialized */ H5S_sel_iter_t bkg_iter; /*background iteration info*/ hbool_t bkg_iter_init=0; /*background iteration info has been initialized */ H5S_sel_iter_t file_iter; /*file selection iteration info*/ hbool_t file_iter_init=0; /*file selection iteration info has been initialized */ H5T_bkg_t need_bkg; /*type of background buf*/ uint8_t *tconv_buf = NULL; /*data type conv buffer */ uint8_t *bkg_buf = NULL; /*background buffer */ H5D_storage_t store; /*union of EFL and chunk pointer in file space */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_write) /* Map elements between file and memory for each chunk*/ if(H5D_create_chunk_map(dataset, mem_type, file_space, mem_space, &fm)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't build chunk mapping") /* Set dataset storage for I/O info */ io_info->store=&store; /* * If there is no type conversion then write directly from the * application's buffer. This saves at least one mem-to-mem copy. */ if ( H5Z_xform_noop(dxpl_cache->data_xform_prop) && H5T_path_noop(tpath)) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif /* Get first node in chunk skip list */ chunk_node=H5SL_first(fm.fsel); /* Iterate through chunks to be operated on */ while(chunk_node) { H5D_chunk_info_t *chunk_info; /* chunk information */ /* Get the actual chunk information from the skip list node */ chunk_info=H5SL_item(chunk_node); /* Pass in chunk's coordinates in a union. */ store.chunk.offset = chunk_info->coords; store.chunk.index = chunk_info->index; /* Perform the actual write operation */ status = (io_info->ops.write)(io_info, chunk_info->chunk_points, H5T_get_size(dataset->shared->type), chunk_info->fspace, chunk_info->mspace, buf); /* Check return value from optimized write */ if (status<0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed") /* Get the next chunk node in the skip list */ chunk_node=H5SL_next(chunk_node); } /* end while */ #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].write_timer), &timer); io_info->stats->stats[0].write_nbytes += nelmts * H5T_get_size(mem_type); io_info->stats->stats[0].write_ncalls++; #endif /* direct xfer accomplished successfully */ HGOTO_DONE(SUCCEED) } /* end if */ /* * This is the general case (type conversion, usually). */ if(nelmts==0) HGOTO_DONE(SUCCEED) /* Compute element sizes and other parameters */ src_type_size = H5T_get_size(mem_type); dst_type_size = H5T_get_size(dataset->shared->type); max_type_size = MAX(src_type_size, dst_type_size); target_size = dxpl_cache->max_temp_buf; /* XXX: This could cause a problem if the user sets their buffer size * to the same size as the default, and then the dataset elements are * too large for the buffer... - QAK */ if(target_size==H5D_XFER_MAX_TEMP_BUF_DEF) { /* If the buffer is too small to hold even one element, make it bigger */ if(target_size<max_type_size) target_size = max_type_size; /* If the buffer is too large to hold all the elements, make it smaller */ else if(target_size>(nelmts*max_type_size)) target_size=(size_t)(nelmts*max_type_size); } /* end if */ request_nelmts = target_size / max_type_size; /* Sanity check elements in temporary buffer */ if (request_nelmts==0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "temporary buffer max size is too small") /* * Get a temporary buffer for type conversion unless the app has already * supplied one through the xfer properties. Instead of allocating a * buffer which is the exact size, we allocate the target size. The * malloc() is usually less resource-intensive if we allocate/free the * same size over and over. */ if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) { /* Old data is retrieved into background buffer for VL datatype. The * data is used later for freeing heap objects. */ need_bkg = H5T_BKG_YES; } else if (H5T_path_bkg(tpath)) { H5T_bkg_t path_bkg; /* Type conversion's background info */ /* Retrieve the bkgr buffer property */ need_bkg=dxpl_cache->bkgr_buf_type; path_bkg = H5T_path_bkg(tpath); need_bkg = MAX (path_bkg, need_bkg); } else { need_bkg = H5T_BKG_NO; /*never needed even if app says yes*/ } /* end else */ if (NULL==(tconv_buf=dxpl_cache->tconv_buf)) { /* Allocate temporary buffer */ if((tconv_buf=H5FL_BLK_MALLOC(type_conv,target_size))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for type conversion") } /* end if */ if (need_bkg && NULL==(bkg_buf=dxpl_cache->bkgr_buf)) { /* Allocate background buffer */ /* (Don't need calloc()-like call since file data is already initialized) */ if((bkg_buf=H5FL_BLK_MALLOC(type_conv,(request_nelmts*dst_type_size)))==NULL) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for background conversion") } /* end if */ /* Loop over all the chunks, performing I/O on each */ /* Get first node in chunk skip list */ chunk_node=H5SL_first(fm.fsel); /* Iterate through chunks to be operated on */ while(chunk_node) { H5D_chunk_info_t *chunk_info; /* chunk information */ /* Get the actual chunk information from the skip list node */ chunk_info=H5SL_item(chunk_node); /* initialize selection iterator */ if (H5S_select_iter_init(&file_iter, chunk_info->fspace, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file selection information") file_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&mem_iter, chunk_info->mspace, src_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information") mem_iter_init=1; /*file selection iteration info has been initialized */ if (H5S_select_iter_init(&bkg_iter, chunk_info->fspace, dst_type_size)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize background selection information") bkg_iter_init=1; /*file selection iteration info has been initialized */ /*pass in chunk's coordinates in a union*/ store.chunk.offset = chunk_info->coords; store.chunk.index = chunk_info->index; for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) { /* Go figure out how many elements to read from the file */ assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(chunk_info->chunk_points-smine_start)); smine_nelmts = (size_t)MIN(request_nelmts, (chunk_info->chunk_points-smine_start)); /* * Gather the data from disk into the data type conversion * buffer. Also gather data from application to background buffer * if necessary. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif n = H5D_select_mgath(buf, chunk_info->mspace, &mem_iter, smine_nelmts, dxpl_cache, tconv_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[1].gath_timer), &timer); io_info->stats->stats[1].gath_nbytes += n * src_type_size; io_info->stats->stats[1].gath_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file gather failed") if (H5T_BKG_YES==need_bkg) { #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif n = H5D_select_fgath(io_info, chunk_info->fspace, &bkg_iter, smine_nelmts, bkg_buf/*out*/); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].bkg_timer), &timer); io_info->stats->stats[0].bkg_nbytes += n * dst_type_size; io_info->stats->stats[0].bkg_ncalls++; #endif if (n!=smine_nelmts) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "file gather failed") } /* end if */ /* * Perform data type conversion. */ if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, io_info->dxpl_id)<0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed") /* Do the data transform after the type conversion (since we're using dataset->shared->type) */ if(!H5Z_xform_noop(dxpl_cache->data_xform_prop)) if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->shared->type) < 0) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform") /* * Scatter the data out to the file. */ #ifdef H5S_DEBUG H5_timer_begin(&timer); #endif status = H5D_select_fscat(io_info, chunk_info->fspace, &file_iter, smine_nelmts, tconv_buf); #ifdef H5S_DEBUG H5_timer_end(&(io_info->stats->stats[0].scat_timer), &timer); io_info->stats->stats[0].scat_nbytes += n * dst_type_size; io_info->stats->stats[0].scat_ncalls++; #endif if (status<0) HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "scatter failed") } /* end for */ /* Release selection iterators */ if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") file_iter_init=0; } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") mem_iter_init=0; } /* end if */ if(bkg_iter_init) { if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") bkg_iter_init=0; } /* end if */ /* Get the next chunk node in the skip list */ chunk_node=H5SL_next(chunk_node); } /* end while */ done: /* Release selection iterators, if necessary */ if(file_iter_init) { if(H5S_SELECT_ITER_RELEASE(&file_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(mem_iter_init) { if(H5S_SELECT_ITER_RELEASE(&mem_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if(bkg_iter_init) { if(H5S_SELECT_ITER_RELEASE(&bkg_iter)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release selection iterator") } /* end if */ if (tconv_buf && NULL==dxpl_cache->tconv_buf) H5FL_BLK_FREE(type_conv,tconv_buf); if (bkg_buf && NULL==dxpl_cache->bkgr_buf) H5FL_BLK_FREE(type_conv,bkg_buf); /* Release chunk mapping information */ if(H5D_destroy_chunk_map(&fm) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "can't release chunk mapping") FUNC_LEAVE_NOAPI(ret_value) } /* H5D_chunk_write() */ #ifdef H5_HAVE_PARALLEL /*------------------------------------------------------------------------- * Function: H5D_io_assist_mpio * * Purpose: Common logic for determining if the MPI transfer mode should * be changed. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * *------------------------------------------------------------------------- */ static herr_t H5D_io_assist_mpio(hid_t dxpl_id, H5D_dxpl_cache_t *dxpl_cache, hbool_t *xfer_mode_changed) { herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_io_assist_mpio) /* The following may not handle a collective call correctly * since it does not ensure all processes can handle the write * request according to the MPI collective specification. * Do the collective request via independent mode. */ if (dxpl_cache->xfer_mode==H5FD_MPIO_COLLECTIVE) { H5P_genplist_t *dx_plist; /* Data transer property list */ /* Get the dataset transfer property list */ if (NULL == (dx_plist = H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list") /* Kludge: change the xfer_mode to independent, handle the request, * then xfer_mode before return. * Better way is to get a temporary data_xfer property with * INDEPENDENT xfer_mode and pass it downwards. */ dxpl_cache->xfer_mode = H5FD_MPIO_INDEPENDENT; if(H5P_set (dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &dxpl_cache->xfer_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode") *xfer_mode_changed=TRUE; /* restore it before return */ } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_io_assist_mpio() */ /*------------------------------------------------------------------------- * Function: H5D_io_restore_mpio * * Purpose: Common logic for restoring MPI transfer mode * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Friday, February 6, 2004 * * Modifications: * *------------------------------------------------------------------------- */ static herr_t H5D_io_restore_mpio(hid_t dxpl_id) { H5P_genplist_t *dx_plist; /* Data transer property list */ H5FD_mpio_xfer_t xfer_mode; /*xfer_mode for this request */ herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_io_restore_mpio) /* Get the dataset transfer property list */ if (NULL == (dx_plist = H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list") /* Kludge: change the xfer_mode to independent, handle the request, * then xfer_mode before return. * Better way is to get a temporary data_xfer property with * INDEPENDENT xfer_mode and pass it downwards. */ xfer_mode = H5FD_MPIO_COLLECTIVE; if(H5P_set (dx_plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_io_restore_mpio() */ #endif /*H5_HAVE_PARALLEL*/ /*------------------------------------------------------------------------- * Function: H5D_create_chunk_map * * Purpose: Creates the mapping between elements selected in each chunk * and the elements in the memory selection. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * *------------------------------------------------------------------------- */ static herr_t H5D_create_chunk_map(const H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_space, const H5S_t *mem_space, fm_map *fm) { H5S_t *tmp_mspace=NULL; /* Temporary memory dataspace */ H5S_t *equiv_mspace=NULL; /* Equivalent memory dataspace */ hbool_t equiv_mspace_init=0;/* Equivalent memory dataspace was created */ hid_t f_tid=(-1); /* Temporary copy of file datatype for iteration */ hbool_t iter_init=0; /* Selection iteration info has been initialized */ unsigned f_ndims; /* The number of dimensions of the file's dataspace */ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ H5SL_node_t *curr_node; /* Current node in skip list */ H5S_sel_type fsel_type; /* Selection type on disk */ char bogus; /* "bogus" buffer to pass to selection iterator */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_map) /* Get layout for dataset */ fm->layout = &(dataset->shared->layout); /* Check if the memory space is scalar & make equivalent memory space */ if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space))<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number") if(sm_ndims==0) { hsize_t dims[H5O_LAYOUT_NDIMS]; /* Temporary dimension information */ /* Set up "equivalent" n-dimensional dataspace with size '1' in each dimension */ for(u=0; u<dataset->shared->layout.u.chunk.ndims-1; u++) dims[u]=1; if((equiv_mspace = H5S_create_simple(dataset->shared->layout.u.chunk.ndims-1,dims,NULL))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create equivalent dataspace for scalar space") /* Indicate that this space needs to be released */ equiv_mspace_init=1; /* Set the number of dimensions for the memory dataspace */ fm->m_ndims=dataset->shared->layout.u.chunk.ndims-1; } /* end else */ else { equiv_mspace=(H5S_t *)mem_space; /* Casting away 'const' OK... */ /* Set the number of dimensions for the memory dataspace */ H5_ASSIGN_OVERFLOW(fm->m_ndims,sm_ndims,int,unsigned); } /* end else */ /* Get dim number and dimensionality for each dataspace */ fm->f_ndims=f_ndims=dataset->shared->layout.u.chunk.ndims-1; if(H5S_get_simple_extent_dims(file_space, fm->f_dims, NULL)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality") /* Decide the number of chunks in each dimension*/ for(u=0; u<f_ndims; u++) { /* Keep the size of the chunk dimensions as hsize_t for various routines */ fm->chunk_dim[u]=fm->layout->u.chunk.dim[u]; /* Round up to the next integer # of chunks, to accomodate partial chunks */ fm->chunks[u] = ((fm->f_dims[u]+dataset->shared->layout.u.chunk.dim[u])-1) / dataset->shared->layout.u.chunk.dim[u]; } /* end for */ /* Compute the "down" size of 'chunks' information */ if(H5V_array_down(f_ndims,fm->chunks,fm->down_chunks)<0) HGOTO_ERROR (H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes") /* Initialize skip list for chunk selections */ if((fm->fsel=H5SL_create(H5SL_TYPE_HSIZE,0.5,H5D_DEFAULT_SKIPLIST_HEIGHT))==NULL) HGOTO_ERROR(H5E_DATASET,H5E_CANTCREATE,FAIL,"can't create skip list for chunk selections") /* Initialize "last chunk" information */ fm->last_index=(hsize_t)-1; fm->last_chunk_info=NULL; /* Point at the dataspaces */ fm->file_space=file_space; fm->mem_space=equiv_mspace; fm->mem_space_copy=equiv_mspace_init; /* Make certain to copy memory dataspace if necessary */ /* Get type of selection on disk & in memory */ if((fsel_type=H5S_GET_SELECT_TYPE(file_space))<H5S_SEL_NONE) HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to convert from file to memory data space") if((fm->msel_type=H5S_GET_SELECT_TYPE(equiv_mspace))<H5S_SEL_NONE) HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to convert from file to memory data space") /* Check if file selection is a point selection */ if(fsel_type==H5S_SEL_POINTS) { /* Create temporary datatypes for selection iteration */ if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL)))<0) HGOTO_ERROR (H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype") /* Spaces aren't the same shape, iterate over the memory selection directly */ if(H5S_select_iterate(&bogus, f_tid, file_space, H5D_chunk_file_cb, fm)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections") /* Reset "last chunk" info */ fm->last_index=(hsize_t)-1; fm->last_chunk_info=NULL; } /* end if */ else { /* Build the file selection for each chunk */ if(H5D_create_chunk_file_map_hyper(fm)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file chunk selections") /* Clean file chunks' hyperslab span "scratch" information */ curr_node=H5SL_first(fm->fsel); while(curr_node) { H5D_chunk_info_t *chunk_info; /* Pointer chunk information */ /* Get pointer to chunk's information */ chunk_info=H5SL_item(curr_node); assert(chunk_info); /* Clean hyperslab span's "scratch" information */ if(H5S_hyper_reset_scratch(chunk_info->fspace)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset span scratch info") /* Get the next chunk node in the skip list */ curr_node=H5SL_next(curr_node); } /* end while */ } /* end else */ /* Build the memory selection for each chunk */ if(fsel_type!=H5S_SEL_POINTS && H5S_select_shape_same(file_space,equiv_mspace)==TRUE) { /* Reset chunk template information */ fm->mchunk_tmpl=NULL; /* If the selections are the same shape, use the file chunk information * to generate the memory chunk information quickly. */ if(H5D_create_chunk_mem_map_hyper(fm)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create memory chunk selections") } /* end if */ else { size_t elmt_size; /* Memory datatype size */ /* Make a copy of equivalent memory space */ if((tmp_mspace = H5S_copy(equiv_mspace,TRUE))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") /* De-select the mem space copy */ if(H5S_select_none(tmp_mspace)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to de-select memory space") /* Save chunk template information */ fm->mchunk_tmpl=tmp_mspace; /* Create temporary datatypes for selection iteration */ if(f_tid<0) { if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL)))<0) HGOTO_ERROR (H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype") } /* end if */ /* Create selection iterator for memory selection */ if((elmt_size=H5T_get_size(mem_type))==0) HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "datatype size invalid") if (H5S_select_iter_init(&(fm->mem_iter), equiv_mspace, elmt_size)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator") iter_init=1; /* Selection iteration info has been initialized */ /* Spaces aren't the same shape, iterate over the memory selection directly */ if(H5S_select_iterate(&bogus, f_tid, file_space, H5D_chunk_mem_cb, fm)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create memory chunk selections") /* Clean up hyperslab stuff, if necessary */ if(fm->msel_type!=H5S_SEL_POINTS) { /* Clean memory chunks' hyperslab span "scratch" information */ curr_node=H5SL_first(fm->fsel); while(curr_node) { H5D_chunk_info_t *chunk_info; /* Pointer chunk information */ /* Get pointer to chunk's information */ chunk_info=H5SL_item(curr_node); assert(chunk_info); /* Clean hyperslab span's "scratch" information */ if(H5S_hyper_reset_scratch(chunk_info->mspace)<0) HGOTO_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset span scratch info") /* Get the next chunk node in the skip list */ curr_node=H5SL_next(curr_node); } /* end while */ } /* end if */ } /* end else */ done: /* Release the [potentially partially built] chunk mapping information if an error occurs */ if(ret_value<0) { if(tmp_mspace && !fm->mchunk_tmpl) { if(H5S_close(tmp_mspace)<0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "can't release memory chunk dataspace template") } /* end if */ if (H5D_destroy_chunk_map(fm)<0) HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release chunk mapping") } /* end if */ /* Reset the global dataspace info */ fm->file_space=NULL; fm->mem_space=NULL; if(equiv_mspace_init && equiv_mspace) { if(H5S_close(equiv_mspace)<0) HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "can't release memory chunk dataspace template") } /* end if */ if(iter_init) { if (H5S_SELECT_ITER_RELEASE(&(fm->mem_iter))<0) HDONE_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator") } if(f_tid!=(-1)) { if(H5I_dec_ref(f_tid)<0) HDONE_ERROR (H5E_DATASET, H5E_CANTFREE, FAIL, "Can't decrement temporary datatype ID") } /* end if */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_create_chunk_map() */ /*-------------------------------------------------------------------------- NAME H5D_free_chunk_info PURPOSE Internal routine to destroy a chunk info node USAGE void H5D_free_chunk_info(chunk_info) void *chunk_info; IN: Pointer to chunk info to destroy RETURNS No return value DESCRIPTION Releases all the memory for a chunk info node. Called by H5SL_iterate GLOBAL VARIABLES COMMENTS, BUGS, ASSUMPTIONS EXAMPLES REVISION LOG --------------------------------------------------------------------------*/ static herr_t H5D_free_chunk_info(void *item, void UNUSED *key, void UNUSED *opdata) { H5D_chunk_info_t *chunk_info=(H5D_chunk_info_t *)item; FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_free_chunk_info) assert(chunk_info); /* Close the chunk's file dataspace */ (void)H5S_close(chunk_info->fspace); /* Close the chunk's memory dataspace, if it's not shared */ if(!chunk_info->mspace_shared) (void)H5S_close(chunk_info->mspace); /* Free the actual chunk info */ H5FL_FREE(H5D_chunk_info_t,chunk_info); FUNC_LEAVE_NOAPI(0); } /* H5D_free_chunk_info() */ /*------------------------------------------------------------------------- * Function: H5D_destroy_chunk_map * * Purpose: Destroy chunk mapping information. * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Saturday, May 17, 2003 * * Modifications: * *------------------------------------------------------------------------- */ static herr_t H5D_destroy_chunk_map(const fm_map *fm) { herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_destroy_chunk_map) /* Free the chunk info skip list */ if(fm->fsel) { if(H5SL_count(fm->fsel)>0) if(H5SL_iterate(fm->fsel,H5D_free_chunk_info,NULL)<0) HGOTO_ERROR(H5E_PLIST,H5E_CANTNEXT,FAIL,"can't iterate over chunks") H5SL_close(fm->fsel); } /* end if */ /* Free the memory chunk dataspace template */ if(fm->mchunk_tmpl) if(H5S_close(fm->mchunk_tmpl)<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "can't release memory chunk dataspace template") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_destroy_chunk_map() */ /*------------------------------------------------------------------------- * Function: H5D_create_chunk_file_map_hyper * * Purpose: Create all chunk selections in file. * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Thursday, May 29, 2003 * * Modifications: * *------------------------------------------------------------------------- */ static herr_t H5D_create_chunk_file_map_hyper(const fm_map *fm) { hssize_t ssel_points; /* Number of elements in file selection */ hsize_t sel_points; /* Number of elements in file selection */ hsize_t sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ hsize_t start_coords[H5O_LAYOUT_NDIMS]; /* Starting coordinates of selection */ hsize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ hsize_t end[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */ hsize_t chunk_index; /* Index of chunk */ int curr_dim; /* Current dimension to increment */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_file_map_hyper) /* Sanity check */ assert(fm->f_ndims>0); /* Get number of elements selected in file */ if((ssel_points=H5S_GET_SELECT_NPOINTS(fm->file_space))<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements") H5_ASSIGN_OVERFLOW(sel_points,ssel_points,hssize_t,hsize_t); /* Get bounding box for selection (to reduce the number of chunks to iterate over) */ if(H5S_SELECT_BOUNDS(fm->file_space, sel_start, sel_end)<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") /* Set initial chunk location & hyperslab size */ for(u=0; u<fm->f_ndims; u++) { start_coords[u]=(sel_start[u]/fm->layout->u.chunk.dim[u])*fm->layout->u.chunk.dim[u]; coords[u]=start_coords[u]; end[u]=(coords[u]+fm->chunk_dim[u])-1; } /* end for */ /* Calculate the index of this chunk */ if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") /* Iterate through each chunk in the dataset */ while(sel_points) { /* Check for intersection of temporary chunk and file selection */ /* (Casting away const OK - QAK) */ if(H5S_hyper_intersect_block((H5S_t *)fm->file_space,coords,end)==TRUE) { H5S_t *tmp_fchunk; /* Temporary file dataspace */ H5D_chunk_info_t *new_chunk_info; /* chunk information to insert into skip list */ hssize_t schunk_points; /* Number of elements in chunk selection */ /* Create "temporary" chunk for selection operations (copy file space) */ if((tmp_fchunk = H5S_copy(fm->file_space,TRUE))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") /* Make certain selections are stored in span tree form (not "optimized hyperslab" or "all") */ if(H5S_hyper_convert(tmp_fchunk)<0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to convert selection to span trees") } /* end if */ /* Normalize hyperslab selections by adjusting them by the offset */ if(H5S_hyper_normalize_offset(tmp_fchunk)<0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset") } /* end if */ /* "AND" temporary chunk and current chunk */ if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,fm->chunk_dim,NULL)<0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't create chunk selection") } /* end if */ /* Resize chunk's dataspace dimensions to size of chunk */ if(H5S_set_extent_real(tmp_fchunk,fm->chunk_dim)<0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk dimensions") } /* end if */ /* Move selection back to have correct offset in chunk */ if(H5S_hyper_adjust_u(tmp_fchunk,coords)<0) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection") } /* end if */ /* Add temporary chunk to the list of chunks */ /* Allocate the file & memory chunk information */ if (NULL==(new_chunk_info = H5FL_MALLOC (H5D_chunk_info_t))) { (void)H5S_close(tmp_fchunk); HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") } /* end if */ /* Initialize the chunk information */ /* Set the chunk index */ new_chunk_info->index=chunk_index; /* Set the file chunk dataspace */ new_chunk_info->fspace=tmp_fchunk; /* Set the memory chunk dataspace */ new_chunk_info->mspace=NULL; new_chunk_info->mspace_shared=0; /* Copy the chunk's coordinates */ for(u=0; u<fm->f_ndims; u++) new_chunk_info->coords[u]=coords[u]; new_chunk_info->coords[fm->f_ndims]=0; /* Insert the new chunk into the skip list */ if(H5SL_insert(fm->fsel,new_chunk_info,&new_chunk_info->index)<0) { H5D_free_chunk_info(new_chunk_info,NULL,NULL); HGOTO_ERROR(H5E_DATASPACE,H5E_CANTINSERT,FAIL,"can't insert chunk into skip list") } /* end if */ /* Get number of elements selected in chunk */ if((schunk_points=H5S_GET_SELECT_NPOINTS(tmp_fchunk))<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements") H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points,schunk_points,hssize_t,size_t); /* Decrement # of points left in file selection */ sel_points-=(hsize_t)schunk_points; /* Leave if we are done */ if(sel_points==0) HGOTO_DONE(SUCCEED) assert(sel_points>0); } /* end if */ /* Increment chunk index */ chunk_index++; /* Set current increment dimension */ curr_dim=(int)fm->f_ndims-1; /* Increment chunk location in fastest changing dimension */ H5_CHECK_OVERFLOW(fm->chunk_dim[curr_dim],hsize_t,hssize_t); coords[curr_dim]+=fm->chunk_dim[curr_dim]; end[curr_dim]+=fm->chunk_dim[curr_dim]; /* Bring chunk location back into bounds, if necessary */ if(coords[curr_dim]>sel_end[curr_dim]) { do { /* Reset current dimension's location to 0 */ coords[curr_dim]=start_coords[curr_dim]; /*lint !e771 The start_coords will always be initialized */ end[curr_dim]=(coords[curr_dim]+(hssize_t)fm->chunk_dim[curr_dim])-1; /* Decrement current dimension */ curr_dim--; /* Increment chunk location in current dimension */ coords[curr_dim]+=fm->chunk_dim[curr_dim]; end[curr_dim]=(coords[curr_dim]+fm->chunk_dim[curr_dim])-1; } while(coords[curr_dim]>sel_end[curr_dim]); /* Re-Calculate the index of this chunk */ if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") } /* end if */ } /* end while */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_create_chunk_file_map_hyper() */ /*------------------------------------------------------------------------- * Function: H5D_create_chunk_mem_map_hyper * * Purpose: Create all chunk selections in memory by copying the file * chunk selections and adjusting their offsets to be correct * for the memory. * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Thursday, May 29, 2003 * * Assumptions: That the file and memory selections are the same shape. * * Modifications: * *------------------------------------------------------------------------- */ static herr_t H5D_create_chunk_mem_map_hyper(const fm_map *fm) { H5SL_node_t *curr_node; /* Current node in skip list */ hsize_t file_sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t file_sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ hsize_t mem_sel_start[H5O_LAYOUT_NDIMS]; /* Offset of low bound of file selection */ hsize_t mem_sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */ hssize_t adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to all file chunks */ hssize_t chunk_adjust[H5O_LAYOUT_NDIMS]; /* Adjustment to make to a particular chunk */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_create_chunk_mem_map_hyper) /* Sanity check */ assert(fm->f_ndims>0); /* Check for all I/O going to a single chunk */ if(H5SL_count(fm->fsel)==1) { H5D_chunk_info_t *chunk_info; /* Pointer to chunk information */ /* Get the node */ curr_node=H5SL_first(fm->fsel); /* Get pointer to chunk's information */ chunk_info=H5SL_item(curr_node); assert(chunk_info); /* Check if it's OK to share dataspace */ if(fm->mem_space_copy) { /* Copy the memory dataspace & selection to be the chunk's dataspace & selection */ if((chunk_info->mspace = H5S_copy(fm->mem_space,FALSE))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") } /* end if */ else { /* Just point at the memory dataspace & selection */ /* (Casting away const OK -QAK) */ chunk_info->mspace=(H5S_t *)fm->mem_space; /* Indicate that the chunk's memory space is shared */ chunk_info->mspace_shared=1; } /* end else */ } /* end if */ else { /* Get bounding box for file selection */ if(H5S_SELECT_BOUNDS(fm->file_space, file_sel_start, file_sel_end)<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") /* Get bounding box for memory selection */ if(H5S_SELECT_BOUNDS(fm->mem_space, mem_sel_start, mem_sel_end)<0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info") /* Calculate the adjustment for memory selection from file selection */ assert(fm->m_ndims==fm->f_ndims); for(u=0; u<fm->f_ndims; u++) { H5_CHECK_OVERFLOW(file_sel_start[u],hsize_t,hssize_t); H5_CHECK_OVERFLOW(mem_sel_start[u],hsize_t,hssize_t); adjust[u]=(hssize_t)file_sel_start[u]-(hssize_t)mem_sel_start[u]; } /* end for */ /* Iterate over each chunk in the chunk list */ curr_node=H5SL_first(fm->fsel); while(curr_node) { H5D_chunk_info_t *chunk_info; /* Pointer to chunk information */ /* Get pointer to chunk's information */ chunk_info=H5SL_item(curr_node); assert(chunk_info); /* Copy the information */ /* Copy the memory dataspace */ if((chunk_info->mspace = H5S_copy(fm->mem_space,TRUE))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy memory space") /* Release the current selection */ if(H5S_SELECT_RELEASE(chunk_info->mspace)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection") /* Copy the file chunk's selection */ if(H5S_select_copy(chunk_info->mspace,chunk_info->fspace,FALSE)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy selection") /* Compensate for the chunk offset */ for(u=0; u<fm->f_ndims; u++) { H5_CHECK_OVERFLOW(chunk_info->coords[u],hsize_t,hssize_t); chunk_adjust[u]=adjust[u]-(hssize_t)chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */ } /* end for */ /* Adjust the selection */ if(H5S_hyper_adjust_s(chunk_info->mspace,chunk_adjust)<0) /*lint !e772 The chunk_adjust array will always be initialized */ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk selection") /* Get the next chunk node in the skip list */ curr_node=H5SL_next(curr_node); } /* end while */ } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_create_chunk_mem_map_hyper() */ /*------------------------------------------------------------------------- * Function: H5D_chunk_file_cb * * Purpose: Callback routine for file selection iterator. Used when * creating selections in file for each point selected. * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Wednesday, July 23, 2003 * * Modifications: * *------------------------------------------------------------------------- */ static herr_t H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm) { fm_map *fm = (fm_map*)_fm; /* File<->memory chunk mapping info */ H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ hsize_t coords_in_chunk[H5O_LAYOUT_NDIMS]; /* Coordinates of element in chunk */ hsize_t chunk_index; /* Chunk index */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_file_cb) /* Calculate the index of this chunk */ if(H5V_chunk_index(ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") /* Find correct chunk in file & memory skip list */ if(chunk_index==fm->last_index) { /* If the chunk index is the same as the last chunk index we used, * get the cached info to operate on. */ chunk_info=fm->last_chunk_info; } /* end if */ else { /* If the chunk index is not the same as the last chunk index we used, * find the chunk in the skip list. */ /* Get the chunk node from the skip list */ if((chunk_info=H5SL_search(fm->fsel,&chunk_index))==NULL) { H5S_t *fspace; /* Memory chunk's dataspace */ /* Allocate the file & memory chunk information */ if (NULL==(chunk_info = H5FL_MALLOC (H5D_chunk_info_t))) HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate chunk info") /* Initialize the chunk information */ /* Set the chunk index */ chunk_info->index=chunk_index; /* Create a dataspace for the chunk */ if((fspace = H5S_create_simple(fm->f_ndims,fm->chunk_dim,NULL))==NULL) { H5FL_FREE(H5D_chunk_info_t,chunk_info); HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create dataspace for chunk") } /* end if */ /* De-select the chunk space */ if(H5S_select_none(fspace)<0) { (void)H5S_close(fspace); H5FL_FREE(H5D_chunk_info_t,chunk_info); HGOTO_ERROR (H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to de-select dataspace") } /* end if */ /* Set the file chunk dataspace */ chunk_info->fspace=fspace; /* Set the memory chunk dataspace */ chunk_info->mspace=NULL; chunk_info->mspace_shared=0; /* Set the number of selected elements in chunk to zero */ chunk_info->chunk_points=0; /* Compute the chunk's coordinates */ for(u=0; u<fm->f_ndims; u++) { H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u],hsize_t,hssize_t); chunk_info->coords[u]=(coords[u]/(hssize_t)fm->layout->u.chunk.dim[u])*(hssize_t)fm->layout->u.chunk.dim[u]; } /* end for */ chunk_info->coords[fm->f_ndims]=0; /* Insert the new chunk into the skip list */ if(H5SL_insert(fm->fsel,chunk_info,&chunk_info->index)<0) { H5D_free_chunk_info(chunk_info,NULL,NULL); HGOTO_ERROR(H5E_DATASPACE,H5E_CANTINSERT,FAIL,"can't insert chunk into skip list") } /* end if */ } /* end if */ /* Update the "last chunk seen" information */ fm->last_index=chunk_index; fm->last_chunk_info=chunk_info; } /* end else */ /* Get the coordinates of the element in the chunk */ for(u=0; u<fm->f_ndims; u++) coords_in_chunk[u]=coords[u]%fm->layout->u.chunk.dim[u]; /* Add point to file selection for chunk */ if(H5S_select_elements(chunk_info->fspace,H5S_SELECT_APPEND,1,(const hsize_t **)coords_in_chunk)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTSELECT, FAIL, "unable to select element") /* Increment the number of elemented selected in chunk */ chunk_info->chunk_points++; done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_chunk_file_cb() */ /*------------------------------------------------------------------------- * Function: H5D_chunk_mem_cb * * Purpose: Callback routine for file selection iterator. Used when * creating selections in memory for each chunk. * * Return: Non-negative on success/Negative on failure * * Programmer: Raymond Lu * Thursday, April 10, 2003 * * Modifications: * QAK - 2003/04/17 * Hacked on it a lot. :-) * *------------------------------------------------------------------------- */ /* ARGSUSED */ static herr_t H5D_chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const hsize_t *coords, void *_fm) { fm_map *fm = (fm_map*)_fm; /* File<->memory chunk mapping info */ H5D_chunk_info_t *chunk_info; /* Chunk information for current chunk */ hsize_t coords_in_mem[H5O_LAYOUT_NDIMS]; /* Coordinates of element in memory */ hsize_t chunk_index; /* Chunk index */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_mem_cb) /* Calculate the index of this chunk */ if(H5V_chunk_index(ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index") /* Find correct chunk in file & memory skip list */ if(chunk_index==fm->last_index) { /* If the chunk index is the same as the last chunk index we used, * get the cached spaces to operate on. */ chunk_info=fm->last_chunk_info; } /* end if */ else { /* If the chunk index is not the same as the last chunk index we used, * find the chunk in the skip list. */ /* Get the chunk node from the skip list */ if((chunk_info=H5SL_search(fm->fsel,&chunk_index))==NULL) HGOTO_ERROR(H5E_DATASPACE,H5E_NOTFOUND,FAIL,"can't locate chunk in skip list") /* Check if the chunk already has a memory space */ if(chunk_info->mspace==NULL) { /* Copy the template memory chunk dataspace */ if((chunk_info->mspace = H5S_copy(fm->mchunk_tmpl,FALSE))==NULL) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy file space") } /* end else */ /* Update the "last chunk seen" information */ fm->last_index=chunk_index; fm->last_chunk_info=chunk_info; } /* end else */ /* Get coordinates of selection iterator for memory */ if(H5S_SELECT_ITER_COORDS(&fm->mem_iter,coords_in_mem)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get iterator coordinates") /* Add point to memory selection for chunk */ if(fm->msel_type==H5S_SEL_POINTS) { if(H5S_select_elements(chunk_info->mspace,H5S_SELECT_APPEND,1,(const hsize_t **)coords_in_mem)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTSELECT, FAIL, "unable to select element") } /* end if */ else { if(H5S_hyper_add_span_element(chunk_info->mspace, fm->m_ndims, coords_in_mem)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTSELECT, FAIL, "unable to select element") } /* end else */ /* Move memory selection iterator to next element in selection */ if(H5S_SELECT_ITER_NEXT(&fm->mem_iter,1)<0) HGOTO_ERROR (H5E_DATASPACE, H5E_CANTNEXT, FAIL, "unable to move to next iterator location") done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_chunk_mem_cb() */ /*------------------------------------------------------------------------- * Function: H5D_get_collective_io_consensus * * Purpose: Compare notes with all other processes involved in this I/O * and see if all are go for collective I/O. * * If all are, return TRUE. * * If any process can't manage collective I/O, then collective * I/O is impossible, and we return FALSE. * * If the flags indicate that collective I/O is impossible, * skip the interprocess communication and just return FALSE. * * In any error is detected, return FAIL. * * Return: Success: TRUE or FALSE * * Failure: FAIL * * Programmer: JRM -- 8/30/04 * * Modifications: * * None. * *------------------------------------------------------------------------- */ #ifdef H5_HAVE_PARALLEL static htri_t H5D_get_collective_io_consensus(const H5F_t *file, const htri_t local_opinion, const unsigned flags) { htri_t ret_value = FAIL; /* will update if successful */ MPI_Comm comm; int int_local_opinion; int consensus; int mpi_result; FUNC_ENTER_NOAPI_NOINIT(H5D_get_collective_io_consensus); HDassert ( ( local_opinion == TRUE ) || ( local_opinion == FALSE ) ); /* Don't do the interprocess communication unless the Parallel I/O * conversion flag is set -- there may not be other processes to * talk to. */ if ( ! ( flags & flags&H5S_CONV_PAR_IO_POSSIBLE ) ) { HGOTO_DONE(FALSE); } comm = H5F_mpi_get_comm(file); if ( comm == MPI_COMM_NULL ) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, \ "can't retrieve MPI communicator") if ( local_opinion == TRUE ) { int_local_opinion = 1; } else { int_local_opinion = 0; } mpi_result = MPI_Allreduce((void *)(&int_local_opinion), (void *)(&consensus), 1, MPI_INT, MPI_LAND, comm); if ( mpi_result != MPI_SUCCESS ) HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_result) if ( consensus ) { ret_value = TRUE; } else { ret_value = FALSE; } done: FUNC_LEAVE_NOAPI(ret_value); } /* H5D_get_collective_io_consensus() */ #endif /* H5_HAVE_PARALLEL */ /*------------------------------------------------------------------------- * Function: H5D_ioinfo_init * * Purpose: Routine for determining correct I/O operations for * each I/O action. * * Return: Non-negative on success/Negative on failure * * Programmer: Quincey Koziol * Thursday, September 30, 2004 * * Modifications: * *------------------------------------------------------------------------- */ static herr_t H5D_ioinfo_init(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, const H5S_t #if !(defined H5_HAVE_PARALLEL || defined H5S_DEBUG) UNUSED #endif /* H5_HAVE_PARALLEL */ *mem_space, const H5S_t #if !(defined H5_HAVE_PARALLEL || defined H5S_DEBUG) UNUSED #endif /* H5_HAVE_PARALLEL */ *file_space, unsigned #ifndef H5_HAVE_PARALLEL UNUSED #endif /* H5_HAVE_PARALLEL */ flags, hbool_t #ifndef H5_HAVE_PARALLEL UNUSED #endif /* H5_HAVE_PARALLEL */ *use_par_opt_io, H5D_io_info_t *io_info) { #ifdef H5_HAVE_PARALLEL htri_t opt; /* Flag whether a selection is optimizable */ #endif /* H5_HAVE_PARALLEL */ herr_t ret_value = SUCCEED; /* Return value */ #if defined H5_HAVE_PARALLEL || defined H5S_DEBUG FUNC_ENTER_NOAPI_NOINIT(H5D_ioinfo_init) #else /* defined H5_HAVE_PARALLEL || defined H5S_DEBUG */ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_ioinfo_init) #endif /* defined H5_HAVE_PARALLEL || defined H5S_DEBUG */ /* check args */ HDassert(dset); HDassert(dset->ent.file); HDassert(mem_space); HDassert(file_space); HDassert(use_par_opt_io); HDassert(io_info); /* Set up "normal" I/O fields */ io_info->dset=dset; io_info->dxpl_cache=dxpl_cache; io_info->dxpl_id=dxpl_id; io_info->store=NULL; /* Set later in I/O routine? */ /* Set I/O operations to initial values */ io_info->ops=dset->shared->io_ops; #ifdef H5_HAVE_PARALLEL /* * Check if we can set direct MPI-IO read/write functions */ opt=H5D_mpio_opt_possible(dset,mem_space,file_space,flags); if(opt==FAIL) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "invalid check for direct IO dataspace "); opt = H5D_get_collective_io_consensus(dset->ent.file, opt, flags); if ( opt == FAIL ) HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, \ "check for collective I/O consensus failed."); /* Check if we can use the optimized parallel I/O routines */ if(opt==TRUE) { /* Set the pointers to the MPI-specific routines */ if((H5S_SELECT_IS_REGULAR(file_space) == TRUE) && (H5S_SELECT_IS_REGULAR(mem_space) == TRUE)){ io_info->ops.read = H5D_mpio_spaces_read; io_info->ops.write = H5D_mpio_spaces_write; } #ifdef H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS else { io_info->ops.read = H5D_mpio_spaces_span_read; io_info->ops.write = H5D_mpio_spaces_span_write; } #endif /* Indicate that the I/O will be parallel */ *use_par_opt_io=TRUE; } /* end if */ else { /* Indicate that the I/O will _NOT_ be parallel */ *use_par_opt_io=FALSE; io_info->ops.read = H5D_select_read; io_info->ops.write = H5D_select_write; } /* end else */ #else io_info->ops.read = H5D_select_read; io_info->ops.write = H5D_select_write; #endif /* H5_HAVE_PARALLEL */ #ifdef H5S_DEBUG /* Get the information for the I/O statistics */ if((io_info->stats=H5S_find(mem_space,file_space))==NULL) HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "can't set up selection statistics"); #endif /* H5S_DEBUG */ #if defined H5_HAVE_PARALLEL || defined H5S_DEBUG done: #endif /* H5_HAVE_PARALLEL || H5S_DEBUG */ FUNC_LEAVE_NOAPI(ret_value) } /* end H5D_ioinfo_init() */