/*********************************************************** Copyright 1991-1995 by Stichting Mathematisch Centrum, Amsterdam, The Netherlands. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the names of Stichting Mathematisch Centrum or CWI or Corporation for National Research Initiatives or CNRI not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. While CWI is the initial source for this software, a modified version is made available by the Corporation for National Research Initiatives (CNRI) at the Internet address ftp://ftp.python.org. STICHTING MATHEMATISCH CENTRUM AND CNRI DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM OR CNRI BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ******************************************************************/ /* List object implementation */ #include "Python.h" #ifdef STDC_HEADERS #include #else #include /* For size_t */ #endif #define ROUNDUP(n, PyTryBlock) \ ((((n)+(PyTryBlock)-1)/(PyTryBlock))*(PyTryBlock)) static int roundupsize(n) int n; { if (n < 500) return ROUNDUP(n, 10); else return ROUNDUP(n, 100); } #define NRESIZE(var, type, nitems) PyMem_RESIZE(var, type, roundupsize(nitems)) PyObject * PyList_New(size) int size; { int i; PyListObject *op; size_t nbytes; if (size < 0) { PyErr_BadInternalCall(); return NULL; } nbytes = size * sizeof(PyObject *); /* Check for overflow */ if (nbytes / sizeof(PyObject *) != (size_t)size) { return PyErr_NoMemory(); } op = (PyListObject *) malloc(sizeof(PyListObject)); if (op == NULL) { return PyErr_NoMemory(); } if (size <= 0) { op->ob_item = NULL; } else { op->ob_item = (PyObject **) malloc(nbytes); if (op->ob_item == NULL) { free((ANY *)op); return PyErr_NoMemory(); } } op->ob_type = &PyList_Type; op->ob_size = size; for (i = 0; i < size; i++) op->ob_item[i] = NULL; _Py_NewReference(op); return (PyObject *) op; } int PyList_Size(op) PyObject *op; { if (!PyList_Check(op)) { PyErr_BadInternalCall(); return -1; } else return ((PyListObject *)op) -> ob_size; } static PyObject *indexerr; PyObject * PyList_GetItem(op, i) PyObject *op; int i; { if (!PyList_Check(op)) { PyErr_BadInternalCall(); return NULL; } if (i < 0 || i >= ((PyListObject *)op) -> ob_size) { if (indexerr == NULL) indexerr = PyString_FromString( "list index out of range"); PyErr_SetObject(PyExc_IndexError, indexerr); return NULL; } return ((PyListObject *)op) -> ob_item[i]; } int PyList_SetItem(op, i, newitem) register PyObject *op; register int i; register PyObject *newitem; { register PyObject *olditem; register PyObject **p; if (!PyList_Check(op)) { Py_XDECREF(newitem); PyErr_BadInternalCall(); return -1; } if (i < 0 || i >= ((PyListObject *)op) -> ob_size) { Py_XDECREF(newitem); PyErr_SetString(PyExc_IndexError, "list assignment index out of range"); return -1; } p = ((PyListObject *)op) -> ob_item + i; olditem = *p; *p = newitem; Py_XDECREF(olditem); return 0; } static int ins1(self, where, v) PyListObject *self; int where; PyObject *v; { int i; PyObject **items; if (v == NULL) { PyErr_BadInternalCall(); return -1; } items = self->ob_item; NRESIZE(items, PyObject *, self->ob_size+1); if (items == NULL) { PyErr_NoMemory(); return -1; } if (where < 0) where = 0; if (where > self->ob_size) where = self->ob_size; for (i = self->ob_size; --i >= where; ) items[i+1] = items[i]; Py_INCREF(v); items[where] = v; self->ob_item = items; self->ob_size++; return 0; } int PyList_Insert(op, where, newitem) PyObject *op; int where; PyObject *newitem; { if (!PyList_Check(op)) { PyErr_BadInternalCall(); return -1; } return ins1((PyListObject *)op, where, newitem); } int PyList_Append(op, newitem) PyObject *op; PyObject *newitem; { if (!PyList_Check(op)) { PyErr_BadInternalCall(); return -1; } return ins1((PyListObject *)op, (int) ((PyListObject *)op)->ob_size, newitem); } /* Methods */ static void list_dealloc(op) PyListObject *op; { int i; if (op->ob_item != NULL) { /* Do it backwards, for Christian Tismer. There's a simple test case where somehow this reduces thrashing when a *very* large list is created and immediately deleted. */ i = op->ob_size; while (--i >= 0) { Py_XDECREF(op->ob_item[i]); } free((ANY *)op->ob_item); } free((ANY *)op); } static int list_print(op, fp, flags) PyListObject *op; FILE *fp; int flags; { int i; i = Py_ReprEnter((PyObject*)op); if (i != 0) { if (i < 0) return i; fprintf(fp, "[...]"); return 0; } fprintf(fp, "["); for (i = 0; i < op->ob_size; i++) { if (i > 0) fprintf(fp, ", "); if (PyObject_Print(op->ob_item[i], fp, 0) != 0) { Py_ReprLeave((PyObject *)op); return -1; } } fprintf(fp, "]"); Py_ReprLeave((PyObject *)op); return 0; } static PyObject * list_repr(v) PyListObject *v; { PyObject *s, *comma; int i; i = Py_ReprEnter((PyObject*)v); if (i != 0) { if (i > 0) return PyString_FromString("[...]"); return NULL; } s = PyString_FromString("["); comma = PyString_FromString(", "); for (i = 0; i < v->ob_size && s != NULL; i++) { if (i > 0) PyString_Concat(&s, comma); PyString_ConcatAndDel(&s, PyObject_Repr(v->ob_item[i])); } Py_XDECREF(comma); PyString_ConcatAndDel(&s, PyString_FromString("]")); Py_ReprLeave((PyObject *)v); return s; } static int list_compare(v, w) PyListObject *v, *w; { int i; for (i = 0; i < v->ob_size && i < w->ob_size; i++) { int cmp = PyObject_Compare(v->ob_item[i], w->ob_item[i]); if (cmp != 0) return cmp; } return v->ob_size - w->ob_size; } static int list_length(a) PyListObject *a; { return a->ob_size; } static PyObject * list_item(a, i) PyListObject *a; int i; { if (i < 0 || i >= a->ob_size) { if (indexerr == NULL) indexerr = PyString_FromString( "list index out of range"); PyErr_SetObject(PyExc_IndexError, indexerr); return NULL; } Py_INCREF(a->ob_item[i]); return a->ob_item[i]; } static PyObject * list_slice(a, ilow, ihigh) PyListObject *a; int ilow, ihigh; { PyListObject *np; int i; if (ilow < 0) ilow = 0; else if (ilow > a->ob_size) ilow = a->ob_size; if (ihigh < ilow) ihigh = ilow; else if (ihigh > a->ob_size) ihigh = a->ob_size; np = (PyListObject *) PyList_New(ihigh - ilow); if (np == NULL) return NULL; for (i = ilow; i < ihigh; i++) { PyObject *v = a->ob_item[i]; Py_INCREF(v); np->ob_item[i - ilow] = v; } return (PyObject *)np; } PyObject * PyList_GetSlice(a, ilow, ihigh) PyObject *a; int ilow, ihigh; { if (!PyList_Check(a)) { PyErr_BadInternalCall(); return NULL; } return list_slice((PyListObject *)a, ilow, ihigh); } static PyObject * list_concat(a, bb) PyListObject *a; PyObject *bb; { int size; int i; PyListObject *np; if (!PyList_Check(bb)) { PyErr_BadArgument(); return NULL; } #define b ((PyListObject *)bb) size = a->ob_size + b->ob_size; np = (PyListObject *) PyList_New(size); if (np == NULL) { return NULL; } for (i = 0; i < a->ob_size; i++) { PyObject *v = a->ob_item[i]; Py_INCREF(v); np->ob_item[i] = v; } for (i = 0; i < b->ob_size; i++) { PyObject *v = b->ob_item[i]; Py_INCREF(v); np->ob_item[i + a->ob_size] = v; } return (PyObject *)np; #undef b } static PyObject * list_repeat(a, n) PyListObject *a; int n; { int i, j; int size; PyListObject *np; PyObject **p; if (n < 0) n = 0; size = a->ob_size * n; np = (PyListObject *) PyList_New(size); if (np == NULL) return NULL; p = np->ob_item; for (i = 0; i < n; i++) { for (j = 0; j < a->ob_size; j++) { *p = a->ob_item[j]; Py_INCREF(*p); p++; } } return (PyObject *) np; } static int list_ass_slice(a, ilow, ihigh, v) PyListObject *a; int ilow, ihigh; PyObject *v; { /* Because [X]DECREF can recursively invoke list operations on this list, we must postpone all [X]DECREF activity until after the list is back in its canonical shape. Therefore we must allocate an additional array, 'recycle', into which we temporarily copy the items that are deleted from the list. :-( */ PyObject **recycle, **p; PyObject **item; int n; /* Size of replacement list */ int d; /* Change in size */ int k; /* Loop index */ #define b ((PyListObject *)v) if (v == NULL) n = 0; else if (PyList_Check(v)) { n = b->ob_size; if (a == b) { /* Special case "a[i:j] = a" -- copy b first */ int ret; v = list_slice(b, 0, n); ret = list_ass_slice(a, ilow, ihigh, v); Py_DECREF(v); return ret; } } else { PyErr_BadArgument(); return -1; } if (ilow < 0) ilow = 0; else if (ilow > a->ob_size) ilow = a->ob_size; if (ihigh < ilow) ihigh = ilow; else if (ihigh > a->ob_size) ihigh = a->ob_size; item = a->ob_item; d = n - (ihigh-ilow); if (ihigh > ilow) p = recycle = PyMem_NEW(PyObject *, (ihigh-ilow)); else p = recycle = NULL; if (d <= 0) { /* Delete -d items; recycle ihigh-ilow items */ for (k = ilow; k < ihigh; k++) *p++ = item[k]; if (d < 0) { for (/*k = ihigh*/; k < a->ob_size; k++) item[k+d] = item[k]; a->ob_size += d; NRESIZE(item, PyObject *, a->ob_size); /* Can't fail */ a->ob_item = item; } } else { /* Insert d items; recycle ihigh-ilow items */ NRESIZE(item, PyObject *, a->ob_size + d); if (item == NULL) { PyMem_XDEL(recycle); PyErr_NoMemory(); return -1; } for (k = a->ob_size; --k >= ihigh; ) item[k+d] = item[k]; for (/*k = ihigh-1*/; k >= ilow; --k) *p++ = item[k]; a->ob_item = item; a->ob_size += d; } for (k = 0; k < n; k++, ilow++) { PyObject *w = b->ob_item[k]; Py_XINCREF(w); item[ilow] = w; } if (recycle) { while (--p >= recycle) Py_XDECREF(*p); PyMem_DEL(recycle); } return 0; #undef b } int PyList_SetSlice(a, ilow, ihigh, v) PyObject *a; int ilow, ihigh; PyObject *v; { if (!PyList_Check(a)) { PyErr_BadInternalCall(); return -1; } return list_ass_slice((PyListObject *)a, ilow, ihigh, v); } static int list_ass_item(a, i, v) PyListObject *a; int i; PyObject *v; { PyObject *old_value; if (i < 0 || i >= a->ob_size) { PyErr_SetString(PyExc_IndexError, "list assignment index out of range"); return -1; } if (v == NULL) return list_ass_slice(a, i, i+1, v); Py_INCREF(v); old_value = a->ob_item[i]; a->ob_item[i] = v; Py_DECREF(old_value); return 0; } static PyObject * ins(self, where, v) PyListObject *self; int where; PyObject *v; { if (ins1(self, where, v) != 0) return NULL; Py_INCREF(Py_None); return Py_None; } static PyObject * listinsert(self, args) PyListObject *self; PyObject *args; { int i; PyObject *v; if (!PyArg_Parse(args, "(iO)", &i, &v)) return NULL; return ins(self, i, v); } static PyObject * listappend(self, args) PyListObject *self; PyObject *args; { PyObject *v; if (!PyArg_Parse(args, "O", &v)) return NULL; return ins(self, (int) self->ob_size, v); } static PyObject * listextend(self, args) PyListObject *self; PyObject *args; { PyObject *b = NULL, *res = NULL; PyObject **items; int selflen = PyList_GET_SIZE(self); int blen; register int i; if (!PyArg_ParseTuple(args, "O", &b)) return NULL; if (!PyList_Check(b)) { PyErr_SetString(PyExc_TypeError, "list.extend() argument must be a list"); return NULL; } if (PyList_GET_SIZE(b) == 0) { /* short circuit when b is empty */ Py_INCREF(Py_None); return Py_None; } if (self == (PyListObject*)b) { /* as in list_ass_slice() we must special case the * situation: a.extend(a) * * XXX: I think this way ought to be faster than using * list_slice() the way list_ass_slice() does. */ b = PyList_New(selflen); if (!b) return NULL; for (i = 0; i < selflen; i++) { PyObject *o = PyList_GET_ITEM(self, i); Py_INCREF(o); PyList_SET_ITEM(b, i, o); } } else /* we want b to have the same refcount semantics for the * Py_XDECREF() in the finally clause regardless of which * branch in the above conditional we took. */ Py_INCREF(b); blen = PyList_GET_SIZE(b); /* resize a using idiom */ items = self->ob_item; NRESIZE(items, PyObject*, selflen + blen); if (items == NULL ) { PyErr_NoMemory(); goto finally; } self->ob_item = items; /* populate the end self with b's items */ for (i = 0; i < blen; i++) { PyObject *o = PyList_GET_ITEM(b, i); Py_INCREF(o); PyList_SET_ITEM(self, self->ob_size++, o); } res = Py_None; Py_INCREF(res); finally: Py_XDECREF(b); return res; } static PyObject * listpop(self, args) PyListObject *self; PyObject *args; { int i = -1; PyObject *v; if (!PyArg_ParseTuple(args, "|i", &i)) return NULL; if (self->ob_size == 0) { /* Special-case most common failure cause */ PyErr_SetString(PyExc_IndexError, "pop from empty list"); return NULL; } if (i < 0) i += self->ob_size; if (i < 0 || i >= self->ob_size) { PyErr_SetString(PyExc_IndexError, "pop index out of range"); return NULL; } v = self->ob_item[i]; Py_INCREF(v); if (list_ass_slice(self, i, i+1, (PyObject *)NULL) != 0) { Py_DECREF(v); return NULL; } return v; } /* New quicksort implementation for arrays of object pointers. Thanks to discussions with Tim Peters. */ /* CMPERROR is returned by our comparison function when an error occurred. This is the largest negative integer (0x80000000 on a 32-bit system). */ #define CMPERROR ( (int) ((unsigned int)1 << (8*sizeof(int) - 1)) ) /* Comparison function. Takes care of calling a user-supplied comparison function (any callable Python object). Calls the standard comparison function, PyObject_Compare(), if the user- supplied function is NULL. */ static int docompare(x, y, compare) PyObject *x; PyObject *y; PyObject *compare; { PyObject *args, *res; int i; if (compare == NULL) { i = PyObject_Compare(x, y); if (i && PyErr_Occurred()) i = CMPERROR; return i; } args = Py_BuildValue("(OO)", x, y); if (args == NULL) return CMPERROR; res = PyEval_CallObject(compare, args); Py_DECREF(args); if (res == NULL) return CMPERROR; if (!PyInt_Check(res)) { Py_DECREF(res); PyErr_SetString(PyExc_TypeError, "comparison function must return int"); return CMPERROR; } i = PyInt_AsLong(res); Py_DECREF(res); if (i < 0) return -1; if (i > 0) return 1; return 0; } /* MINSIZE is the smallest array that will get a full-blown samplesort treatment; smaller arrays are sorted using binary insertion. It must be at least 7 for the samplesort implementation to work. Binary insertion does fewer compares, but can suffer O(N**2) data movement. The more expensive compares, the larger MINSIZE should be. */ #define MINSIZE 100 /* MINPARTITIONSIZE is the smallest array slice samplesort will bother to partition; smaller slices are passed to binarysort. It must be at least 2, and no larger than MINSIZE. Setting it higher reduces the # of compares slowly, but increases the amount of data movement quickly. The value here was chosen assuming a compare costs ~25x more than swapping a pair of memory-resident pointers -- but under that assumption, changing the value by a few dozen more or less has aggregate effect under 1%. So the value is crucial, but not touchy . */ #define MINPARTITIONSIZE 40 /* MAXMERGE is the largest number of elements we'll always merge into a known-to-be sorted chunk via binary insertion, regardless of the size of that chunk. Given a chunk of N sorted elements, and a group of K unknowns, the largest K for which it's better to do insertion (than a full-blown sort) is a complicated function of N and K mostly involving the expected number of compares and data moves under each approach, and the relative cost of those operations on a specific architecure. The fixed value here is conservative, and should be a clear win regardless of architecture or N. */ #define MAXMERGE 15 /* STACKSIZE is the size of our work stack. A rough estimate is that this allows us to sort arrays of size N where N / ln(N) = MINPARTITIONSIZE * 2**STACKSIZE, so 60 is more than enough for arrays of size 2**64. Because we push the biggest partition first, the worst case occurs when all subarrays are always partitioned exactly in two. */ #define STACKSIZE 60 #define SETK(X,Y) if ((k = docompare(X,Y,compare))==CMPERROR) goto fail /* binarysort is the best method for sorting small arrays: it does few compares, but can do data movement quadratic in the number of elements. [lo, hi) is a contiguous slice of a list, and is sorted via binary insertion. On entry, must have lo <= start <= hi, and that [lo, start) is already sorted (pass start == lo if you don't know!). If docompare complains (returns CMPERROR) return -1, else 0. Even in case of error, the output slice will be some permutation of the input (nothing is lost or duplicated). */ static int binarysort(lo, hi, start, compare) PyObject **lo; PyObject **hi; PyObject **start; PyObject *compare;/* Comparison function object, or NULL for default */ { /* assert lo <= start <= hi assert [lo, start) is sorted */ register int k; register PyObject **l, **p, **r; register PyObject *pivot; if (lo == start) ++start; for (; start < hi; ++start) { /* set l to where *start belongs */ l = lo; r = start; pivot = *r; do { p = l + ((r - l) >> 1); SETK(pivot, *p); if (k < 0) r = p; else l = p + 1; } while (l < r); /* Pivot should go at l -- slide over to make room. Caution: using memmove is much slower under MSVC 5; we're not usually moving many slots. */ for (p = start; p > l; --p) *p = *(p-1); *l = pivot; } return 0; fail: return -1; } /* samplesortslice is the sorting workhorse. [lo, hi) is a contiguous slice of a list, to be sorted in place. On entry, must have lo <= hi, If docompare complains (returns CMPERROR) return -1, else 0. Even in case of error, the output slice will be some permutation of the input (nothing is lost or duplicated). samplesort is basically quicksort on steroids: a power of 2 close to n/ln(n) is computed, and that many elements (less 1) are picked at random from the array and sorted. These 2**k - 1 elements are then used as preselected pivots for an equal number of quicksort partitioning steps, partitioning the slice into 2**k chunks each of size about ln(n). These small final chunks are then usually handled by binarysort. Note that when k=1, this is roughly the same as an ordinary quicksort using a random pivot, and when k=2 this is roughly a median-of-3 quicksort. From that view, using k ~= lg(n/ln(n)) makes this a "median of n/ln(n)" quicksort. You can also view it as a kind of bucket sort, where 2**k-1 bucket boundaries are picked dynamically. The large number of samples makes a quadratic-time case almost impossible, and asymptotically drives the average-case number of compares from quicksort's 2 N ln N (or 12/7 N ln N for the median-of- 3 variant) down to N lg N. We also play lots of low-level tricks to cut the number of compares. Very obscure: To avoid using extra memory, the PPs are stored in the array and shuffled around as partitioning proceeds. At the start of a partitioning step, we'll have 2**m-1 (for some m) PPs in sorted order, adjacent (either on the left or the right!) to a chunk of X elements that are to be partitioned: P X or X P. In either case we need to shuffle things *in place* so that the 2**(m-1) smaller PPs are on the left, followed by the PP to be used for this step (that's the middle of the PPs), followed by X, followed by the 2**(m-1) larger PPs: P X or X P -> Psmall pivot X Plarge and the order of the PPs must not be altered. It can take a while to realize this isn't trivial! It can take even longer to understand why the simple code below works, using only 2**(m-1) swaps. The key is that the order of the X elements isn't necessarily preserved: X can end up as some cyclic permutation of its original order. That's OK, because X is unsorted anyway. If the order of X had to be preserved too, the simplest method I know of using O(1) scratch storage requires len(X) + 2**(m-1) swaps, spread over 2 passes. Since len(X) is typically several times larger than 2**(m-1), that would slow things down. */ struct SamplesortStackNode { /* Represents a slice of the array, from (& including) lo up to (but excluding) hi. "extra" additional & adjacent elements are pre-selected pivots (PPs), spanning [lo-extra, lo) if extra > 0, or [hi, hi-extra) if extra < 0. The PPs are already sorted, but nothing is known about the other elements in [lo, hi). |extra| is always one less than a power of 2. When extra is 0, we're out of PPs, and the slice must be sorted by some other means. */ PyObject **lo; PyObject **hi; int extra; }; /* The number of PPs we want is 2**k - 1, where 2**k is as close to N / ln(N) as possible. So k ~= lg(N / ln(N)). Calling libm routines is undesirable, so cutoff values are canned in the "cutoff" table below: cutoff[i] is the smallest N such that k == CUTOFFBASE + i. */ #define CUTOFFBASE 4 static long cutoff[] = { 43, /* smallest N such that k == 4 */ 106, /* etc */ 250, 576, 1298, 2885, 6339, 13805, 29843, 64116, 137030, 291554, 617916, 1305130, 2748295, 5771662, 12091672, 25276798, 52734615, 109820537, 228324027, 473977813, 982548444, /* smallest N such that k == 26 */ 2034159050 /* largest N that fits in signed 32-bit; k == 27 */ }; static int samplesortslice(lo, hi, compare) PyObject **lo; PyObject **hi; PyObject *compare;/* Comparison function object, or NULL for default */ { register PyObject **l, **r; register PyObject *tmp, *pivot; register int k; int n, extra, top, extraOnRight; struct SamplesortStackNode stack[STACKSIZE]; /* assert lo <= hi */ n = hi - lo; /* ---------------------------------------------------------- * Special cases * --------------------------------------------------------*/ if (n < 2) return 0; /* Set r to the largest value such that [lo,r) is sorted. This catches the already-sorted case, the all-the-same case, and the appended-a-few-elements-to-a-sorted-list case. If the array is unsorted, we're very likely to get out of the loop fast, so the test is cheap if it doesn't pay off. */ /* assert lo < hi */ for (r = lo+1; r < hi; ++r) { SETK(*r, *(r-1)); if (k < 0) break; } /* [lo,r) is sorted, [r,hi) unknown. Get out cheap if there are few unknowns, or few elements in total. */ if (hi - r <= MAXMERGE || n < MINSIZE) return binarysort(lo, hi, r, compare); /* Check for the array already being reverse-sorted. Typical benchmark-driven silliness . */ /* assert lo < hi */ for (r = lo+1; r < hi; ++r) { SETK(*(r-1), *r); if (k < 0) break; } if (hi - r <= MAXMERGE) { /* Reverse the reversed prefix, then insert the tail */ PyObject **originalr = r; l = lo; do { --r; tmp = *l; *l = *r; *r = tmp; ++l; } while (l < r); return binarysort(lo, hi, originalr, compare); } /* ---------------------------------------------------------- * Normal case setup: a large array without obvious pattern. * --------------------------------------------------------*/ /* extra := a power of 2 ~= n/ln(n), less 1. First find the smallest extra s.t. n < cutoff[extra] */ for (extra = 0; extra < sizeof(cutoff) / sizeof(cutoff[0]); ++extra) { if (n < cutoff[extra]) break; /* note that if we fall out of the loop, the value of extra still makes *sense*, but may be smaller than we would like (but the array has more than ~= 2**31 elements in this case!) */ } /* Now k == extra - 1 + CUTOFFBASE. The smallest value k can have is CUTOFFBASE-1, so assert MINSIZE >= 2**(CUTOFFBASE-1) - 1 */ extra = (1 << (extra - 1 + CUTOFFBASE)) - 1; /* assert extra > 0 and n >= extra */ /* Swap that many values to the start of the array. The selection of elements is pseudo-random, but the same on every run (this is intentional! timing algorithm changes is a pain if timing varies across runs). */ { unsigned int seed = n / extra; /* arbitrary */ unsigned int i; for (i = 0; i < (unsigned)extra; ++i) { /* j := random int in [i, n) */ unsigned int j; seed = seed * 69069 + 7; j = i + seed % (n - i); tmp = lo[i]; lo[i] = lo[j]; lo[j] = tmp; } } /* Recursively sort the preselected pivots. */ if (samplesortslice(lo, lo + extra, compare) < 0) goto fail; top = 0; /* index of available stack slot */ lo += extra; /* point to first unknown */ extraOnRight = 0; /* the PPs are at the left end */ /* ---------------------------------------------------------- * Partition [lo, hi), and repeat until out of work. * --------------------------------------------------------*/ for (;;) { /* assert lo <= hi, so n >= 0 */ n = hi - lo; /* We may not want, or may not be able, to partition: If n is small, it's quicker to insert. If extra is 0, we're out of pivots, and *must* use another method. */ if (n < MINPARTITIONSIZE || extra == 0) { if (n >= MINSIZE) { /* assert extra == 0 This is rare, since the average size of a final block is only about ln(original n). */ if (samplesortslice(lo, hi, compare) < 0) goto fail; } else { /* Binary insertion should be quicker, and we can take advantage of the PPs already being sorted. */ if (extraOnRight && extra) { /* swap the PPs to the left end */ k = extra; do { tmp = *lo; *lo = *hi; *hi = tmp; ++lo; ++hi; } while (--k); } if (binarysort(lo - extra, hi, lo, compare) < 0) goto fail; } /* Find another slice to work on. */ if (--top < 0) break; /* no more -- done! */ lo = stack[top].lo; hi = stack[top].hi; extra = stack[top].extra; extraOnRight = 0; if (extra < 0) { extraOnRight = 1; extra = -extra; } continue; } /* Pretend the PPs are indexed 0, 1, ..., extra-1. Then our preselected pivot is at (extra-1)/2, and we want to move the PPs before that to the left end of the slice, and the PPs after that to the right end. The following section changes extra, lo, hi, and the slice such that: [lo-extra, lo) contains the smaller PPs. *lo == our PP. (lo, hi) contains the unknown elements. [hi, hi+extra) contains the larger PPs. */ k = extra >>= 1; /* num PPs to move */ if (extraOnRight) { /* Swap the smaller PPs to the left end. Note that this loop actually moves k+1 items: the last is our PP */ do { tmp = *lo; *lo = *hi; *hi = tmp; ++lo; ++hi; } while (k--); } else { /* Swap the larger PPs to the right end. */ while (k--) { --lo; --hi; tmp = *lo; *lo = *hi; *hi = tmp; } } --lo; /* *lo is now our PP */ pivot = *lo; /* Now an almost-ordinary quicksort partition step. Note that most of the time is spent here! Only odd thing is that we partition into < and >=, instead of the usual <= and >=. This helps when there are lots of duplicates of different values, because it eventually tends to make subfiles "pure" (all duplicates), and we special-case for duplicates later. */ l = lo + 1; r = hi - 1; /* assert lo < l < r < hi (small n weeded out above) */ do { /* slide l right, looking for key >= pivot */ do { SETK(*l, pivot); if (k < 0) ++l; else break; } while (l < r); /* slide r left, looking for key < pivot */ while (l < r) { register PyObject *rval = *r--; SETK(rval, pivot); if (k < 0) { /* swap and advance */ r[1] = *l; *l++ = rval; break; } } } while (l < r); /* assert lo < r <= l < hi assert r == l or r+1 == l everything to the left of l is < pivot, and everything to the right of r is >= pivot */ if (l == r) { SETK(*r, pivot); if (k < 0) ++l; else --r; } /* assert lo <= r and r+1 == l and l <= hi assert r == lo or a[r] < pivot assert a[lo] is pivot assert l == hi or a[l] >= pivot Swap the pivot into "the middle", so we can henceforth ignore it. */ *lo = *r; *r = pivot; /* The following is true now, & will be preserved: All in [lo,r) are < pivot All in [r,l) == pivot (& so can be ignored) All in [l,hi) are >= pivot */ /* Check for duplicates of the pivot. One compare is wasted if there are no duplicates, but can win big when there are. Tricky: we're sticking to "<" compares, so deduce equality indirectly. We know pivot <= *l, so they're equal iff not pivot < *l. */ while (l < hi) { /* pivot <= *l known */ SETK(pivot, *l); if (k < 0) break; else /* <= and not < implies == */ ++l; } /* assert lo <= r < l <= hi Partitions are [lo, r) and [l, hi) */ /* push fattest first; remember we still have extra PPs to the left of the left chunk and to the right of the right chunk! */ /* assert top < STACKSIZE */ if (r - lo <= hi - l) { /* second is bigger */ stack[top].lo = l; stack[top].hi = hi; stack[top].extra = -extra; hi = r; extraOnRight = 0; } else { /* first is bigger */ stack[top].lo = lo; stack[top].hi = r; stack[top].extra = extra; lo = l; extraOnRight = 1; } ++top; } /* end of partitioning loop */ return 0; fail: return -1; } #undef SETK staticforward PyTypeObject immutable_list_type; static PyObject * listsort(self, compare) PyListObject *self; PyObject *compare; { int err; self->ob_type = &immutable_list_type; err = samplesortslice(self->ob_item, self->ob_item + self->ob_size, compare); self->ob_type = &PyList_Type; if (err < 0) return NULL; Py_INCREF(Py_None); return Py_None; } int PyList_Sort(v) PyObject *v; { if (v == NULL || !PyList_Check(v)) { PyErr_BadInternalCall(); return -1; } v = listsort((PyListObject *)v, (PyObject *)NULL); if (v == NULL) return -1; Py_DECREF(v); return 0; } static PyObject * listreverse(self, args) PyListObject *self; PyObject *args; { register PyObject **p, **q; register PyObject *tmp; if (args != NULL) { PyErr_BadArgument(); return NULL; } if (self->ob_size > 1) { for (p = self->ob_item, q = self->ob_item + self->ob_size - 1; p < q; p++, q--) { tmp = *p; *p = *q; *q = tmp; } } Py_INCREF(Py_None); return Py_None; } int PyList_Reverse(v) PyObject *v; { if (v == NULL || !PyList_Check(v)) { PyErr_BadInternalCall(); return -1; } v = listreverse((PyListObject *)v, (PyObject *)NULL); if (v == NULL) return -1; Py_DECREF(v); return 0; } PyObject * PyList_AsTuple(v) PyObject *v; { PyObject *w; PyObject **p; int n; if (v == NULL || !PyList_Check(v)) { PyErr_BadInternalCall(); return NULL; } n = ((PyListObject *)v)->ob_size; w = PyTuple_New(n); if (w == NULL) return NULL; p = ((PyTupleObject *)w)->ob_item; memcpy((ANY *)p, (ANY *)((PyListObject *)v)->ob_item, n*sizeof(PyObject *)); while (--n >= 0) { Py_INCREF(*p); p++; } return w; } static PyObject * listindex(self, args) PyListObject *self; PyObject *args; { int i; if (args == NULL) { PyErr_BadArgument(); return NULL; } for (i = 0; i < self->ob_size; i++) { if (PyObject_Compare(self->ob_item[i], args) == 0) return PyInt_FromLong((long)i); if (PyErr_Occurred()) return NULL; } PyErr_SetString(PyExc_ValueError, "list.index(x): x not in list"); return NULL; } static PyObject * listcount(self, args) PyListObject *self; PyObject *args; { int count = 0; int i; if (args == NULL) { PyErr_SetString(PyExc_TypeError, "list.count(x): argument missing"); return NULL; } for (i = 0; i < self->ob_size; i++) { if (PyObject_Compare(self->ob_item[i], args) == 0) count++; if (PyErr_Occurred()) return NULL; } return PyInt_FromLong((long)count); } static PyObject * listremove(self, args) PyListObject *self; PyObject *args; { int i; if (args == NULL) { PyErr_BadArgument(); return NULL; } for (i = 0; i < self->ob_size; i++) { if (PyObject_Compare(self->ob_item[i], args) == 0) { if (list_ass_slice(self, i, i+1, (PyObject *)NULL) != 0) return NULL; Py_INCREF(Py_None); return Py_None; } if (PyErr_Occurred()) return NULL; } PyErr_SetString(PyExc_ValueError, "list.remove(x): x not in list"); return NULL; } static char append_doc[] = "L.append(object) -- append object to end"; static char extend_doc[] = "L.extend(list) -- extend list by appending list elements"; static char insert_doc[] = "L.insert(index, object) -- insert object before index"; static char pop_doc[] = "L.pop([index]) -> item -- remove and return item at index (default last)"; static char remove_doc[] = "L.remove(value) -- remove first occurrence of value"; static char index_doc[] = "L.index(value) -> integer -- return index of first occurrence of value"; static char count_doc[] = "L.count(value) -> integer -- return number of occurrences of value"; static char reverse_doc[] = "L.reverse() -- reverse *IN PLACE*"; static char sort_doc[] = "L.sort([cmpfunc]) -- sort *IN PLACE*; if given, cmpfunc(x, y) -> -1, 0, 1"; static PyMethodDef list_methods[] = { {"append", (PyCFunction)listappend, 0, append_doc}, {"insert", (PyCFunction)listinsert, 0, insert_doc}, {"extend", (PyCFunction)listextend, 1, extend_doc}, {"pop", (PyCFunction)listpop, 1, pop_doc}, {"remove", (PyCFunction)listremove, 0, remove_doc}, {"index", (PyCFunction)listindex, 0, index_doc}, {"count", (PyCFunction)listcount, 0, count_doc}, {"reverse", (PyCFunction)listreverse, 0, reverse_doc}, {"sort", (PyCFunction)listsort, 0, sort_doc}, {NULL, NULL} /* sentinel */ }; static PyObject * list_getattr(f, name) PyListObject *f; char *name; { return Py_FindMethod(list_methods, (PyObject *)f, name); } static PySequenceMethods list_as_sequence = { (inquiry)list_length, /*sq_length*/ (binaryfunc)list_concat, /*sq_concat*/ (intargfunc)list_repeat, /*sq_repeat*/ (intargfunc)list_item, /*sq_item*/ (intintargfunc)list_slice, /*sq_slice*/ (intobjargproc)list_ass_item, /*sq_ass_item*/ (intintobjargproc)list_ass_slice, /*sq_ass_slice*/ }; PyTypeObject PyList_Type = { PyObject_HEAD_INIT(&PyType_Type) 0, "list", sizeof(PyListObject), 0, (destructor)list_dealloc, /*tp_dealloc*/ (printfunc)list_print, /*tp_print*/ (getattrfunc)list_getattr, /*tp_getattr*/ 0, /*tp_setattr*/ (cmpfunc)list_compare, /*tp_compare*/ (reprfunc)list_repr, /*tp_repr*/ 0, /*tp_as_number*/ &list_as_sequence, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ }; /* During a sort, we really can't have anyone modifying the list; it could cause core dumps. Thus, we substitute a dummy type that raises an explanatory exception when a modifying operation is used. Caveat: comparisons may behave differently; but I guess it's a bad idea anyway to compare a list that's being sorted... */ static PyObject * immutable_list_op(/*No args!*/) { PyErr_SetString(PyExc_TypeError, "a list cannot be modified while it is being sorted"); return NULL; } static PyMethodDef immutable_list_methods[] = { {"append", (PyCFunction)immutable_list_op}, {"insert", (PyCFunction)immutable_list_op}, {"remove", (PyCFunction)immutable_list_op}, {"index", (PyCFunction)listindex}, {"count", (PyCFunction)listcount}, {"reverse", (PyCFunction)immutable_list_op}, {"sort", (PyCFunction)immutable_list_op}, {NULL, NULL} /* sentinel */ }; static PyObject * immutable_list_getattr(f, name) PyListObject *f; char *name; { return Py_FindMethod(immutable_list_methods, (PyObject *)f, name); } static int immutable_list_ass(/*No args!*/) { immutable_list_op(); return -1; } static PySequenceMethods immutable_list_as_sequence = { (inquiry)list_length, /*sq_length*/ (binaryfunc)list_concat, /*sq_concat*/ (intargfunc)list_repeat, /*sq_repeat*/ (intargfunc)list_item, /*sq_item*/ (intintargfunc)list_slice, /*sq_slice*/ (intobjargproc)immutable_list_ass, /*sq_ass_item*/ (intintobjargproc)immutable_list_ass, /*sq_ass_slice*/ }; static PyTypeObject immutable_list_type = { PyObject_HEAD_INIT(&PyType_Type) 0, "list (immutable, during sort)", sizeof(PyListObject), 0, 0, /*tp_dealloc*/ /* Cannot happen */ (printfunc)list_print, /*tp_print*/ (getattrfunc)immutable_list_getattr, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ /* Won't be called */ (reprfunc)list_repr, /*tp_repr*/ 0, /*tp_as_number*/ &immutable_list_as_sequence, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ }; ame by idx' routines to return actual length of name with a parameter instead of the return value, and move some callback context structs for the link interface from the private header file into the source code module, to reduce their visibility scope. * Update tracing macros * Convert link VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor H5G_loc_exists, et al, to return 'exists' flag in a parameter and errors with the function return value, instead of overloading both into the return value. And, corrected logic error in test/links.c around non-existant objects in a file. * Convert request VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert blob VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also removes the H5VL_BLOB_GETSIZE operation, as it's unused in the library and the blob ID size for a container is now returned with H5VL_FILE_GET_CONT_INFO. * Add 'const' to several parameters that are only queried. * Convert all VOL classes' 'optional' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Convert several 'get' routines to return the length of an array in a parameter instead of combining it into the return value. Move several routines to be in less public namespace. Correct direct_chunk test to verify that parameters aren't modified on error. * Switch get/specific/optional VOL callback argument structures to be 'async-friendly'. Also other minor cleanups and bug-fixes. * Add H5Pset_dataset_io_hyperslab_selection / H5S_PLIST feature, to allow skipping H5Dget_space + H5Sselect_hyperslab for async operation * Add dynamic optional operations for request objects * Update dynamic operation test for optional request operations * Update a comment for an operation argument * Run trace and format_source scripts * Committing clang-format changes * Committing clang-format changes Co-authored-by: vchoi <vchoi@jelly.ad.hdfgroup.org> Co-authored-by: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Co-authored-by: jhendersonHDF <jhenderson@hdfgroup.org> Co-authored-by: Dana Robinson <derobins@hdfgroup.org> Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Fixed many -Wreserved-id-macro warnings by fixing header guard spelling (#361)Sean McBride2021-02-231-3/+3 | | | | | | | | | | | | | | | | * Fixed many -Wreserved-id-macro warnings by fixing header guard spelling Removed leading underscore(s) from header guard spelling. Used 2 regexes: ` _H5(.*)_H` ` __H5(.*)_H` Applied case-insensitively to only .h files. * Modified scripts that generate header files to not use underscore prefix Interestingly, there was already no leading underscore in the trailing comment at the end of the file * Fixed remaining -Wreserved-id-macro warning not caught by regex * Fixed all -Wincompatible-pointer-types-discards-qualifiers warnings (#341)Sean McBride2021-02-221-1/+1 | | | | | | | | | | | | | * Fixed various -Wincompatible-pointer-types-discards-qualifiers warnings by adding const * Fixed various -Wincompatible-pointer-types-discards-qualifiers warning by removing extraneous consts There were casts with const, but the function parameter doesn't actaully take const, so just modified the casts. In the other case, a local variable was const that should not have been, becuase its source wasn't const either. * Fixed a -Wincompatible-pointer-types-discards-qualifiers warning by strdup-ing a string Create a duplicate string instead of mutating a supposedly const one. * Update license url (#332)Larry Knox2021-02-171-1/+1 | | | | | | * Modify temporary rpath for testing in java example scripts. * Update URL in source file Copyright headers for web copy of COPYING file - src and test directories. * develop revert source to clang-format version 11 (#293)Allen Byrne2021-01-291-10/+10 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | * OESS-98 convert plugin option to FetchContent, add tests * Fixes for pkcfg files because of plugin option * OESS-98 fix tools test for plugins * Keep doxygen comments under 100 chars long - format hint * Whitespace * HDFFV-11144 - Reclassify CMake messages * HDFFV-11099/11100 added help text * Reworked switch statement to compare string instead * Fix typo * Update CDash mode * Correct name of threadsafe * Correct option name * Undo accidental commit * Note LLVM 10 to 11 format default changes * Update format plugin * Undo clang-format version 11 changes * One more correction * Bring async branch to develop (#166)Quincey Koziol2020-12-141-15/+15 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | * Add H5Fwait, H5Dwait, and other changes for async vol connector * Revert temporary testing changes * Add H5Fwait to header file * Add H5VLreset_lib_state() routine. * Correct VOL wrap context when retrieving library state for file open & create. * Manage the API context's VOL connector state as part of the library state. * Set the 'VOL connector property valid' flag when restoring the library state. * Don't push new API context on stack when retrieving "current" one. * Check for NULL VOL wrap context before decrementing refcount on it, when freeing the API context state. * Manage recount of underlying connector for VOL wrap context. * Add H5TSmutex_acquire() and H5TSmutex_release() routines to acquire and release the global lock on the HDF5 library. * Update library with new functions related to library global lock * Add asynchronous token API * Add new lightweight FUNC_ENTER / LEAVE macros for helping to structure the threadsafety (H5TS*) routines. * Sync w/develop * Initial event set code framework. * Elaborate on the H5ES routines, starting to add API routines. Update the "close ID" callbacks to allow asynchronous request tokens to be passed in from an asynchronous close API call. Refactor current asynchronous API routines (H5Fopen/H5Fclose and H5Dread/H5Dread) to use event sets instead of directly working with request tokens from the application. Clean up a few other minor warnings & code style issues. * Implement H5EScreate, H5ESget_count, and H5ESclose. It should be possible to write a simple application that creates an event set and uses it with H5Fopen_async, H5Dread_async, H5Dwrite_async, and H5Fclose_async, then calls H5ESclose (which waits for all async events to complete). * Add source file for event set test. * Refactor sync & async API routines to call common routine. Move dataset API read / write routines to src/H5D.c, along with all the other API routines. Progress on "fake" async VOL connector, for testing. * Modify async implementation to wrap async requests in a H5VL_object_t struct so the VOL layer can find the connector when accessing the request at a later point. * Free the requests is H5ESclose. Remove comments implying that request wait, notify, and cancel callbacks should free the request. * Fix bug in error handling in H5Fclose. * Fix bugs in async file routines. Rename H5VL_create_object_generic to H5VL_create_object. * Add explicit async version of H5Fcreate. * Add more _async routines * Correct typo for return value from H5Awrite changes * Add H5EStest and H5ESwait routines * Updated with API tracing info * Fix NULL pointer dereference in H5ES__wait * Add H5is_library_terminating() routine, so that VOL connectors can detect when the library is shutting down. * Fix typo * Remove event from event set's linked list * Move block of code so that vol_obj is valid * Avoid setting properties in the DXPL when reseting the library state (and in the test code). * Refactor argument tracing to implement new capability for tracing arguments of non-API routines, with the H5ARG_TRACE macro. This macro is updated automatically with the bin/trace script that runs as part of the autogen.sh process. Major changes were in src/H5trace.c and bin/trace, with the other changes being cleanups to the argument lists, to improve their presentation in the tracing output. Also - added the "managed string" routines, which can dynamically allocate strings, appending printf-formatted output to the string efficiently. * Release memory for managed strings * Fix printf() formats. * More printf() format fixes. * Add H5Eappend_stack routine and regression tests * Clean up a few missed merge conflicts and update the error stacks. * Remove unnecessary fork() operations and ten second sleep(). * Restore commented out attribute out, to better enable tracking down the previous failure * Allow multiple H5ARG_TRACE macros within a routine to be updated * Switch to using "new" H5ES_insert (which takes the arguments for the caller routine) for all event set operations. Renames H5ES_insert_new to H5ES_insert and removes the previous H5ES_insert. * Merge "managed" string routines into "ref-counted" strings, and refactor code to use them. * Add missing file. * Add caller's name and arguments to event, for error reporting * Refactor event set setup for "API common" internal routines * Checkin async API routines for H5A* and H5G* modules as listed in ID-283. Fix couple h5dump expected output files due to the changes. * Add some of the error query routines needed for event sets. * Refactor to make async setup / teardown and "common" routines cleaner * (1) Add async APIs to H5L, H5F, and H5D modules (2) Fix errors in previous checkins of async APIs in H5A and H5G modules (3) h5dump expected output changes * Enhance event info, for better error handling * Change name of temporary vol_obj variable, to help reduce coding errors * Fix oversight with vol_obj pointer * Minor code cleanup * Add missing \'valid\' flag for VOL wrapper context, when restoring the library\'s state * Run source formatter * Change H5TSmutex lock and release to include a lock count * Update error reporting ideas * Minor updates to improve sanity checking for retrieving / restoring library state * Updated with feedback from h5py team members * Refactor internal event set list and event handling, add implementation for H5ESget_err_info * Change the VOL request subclass callbacks that switch from using "H5ES_status_t" to "H5VL_request_status_t", and also add a H5VL_request_status_t* parameter to the 'cancel' callback in the request subclass. Also more code quality cleanups to add iterator callbacks to internal event set routines. * Update API tracing for new H5VL_request_status_t typedef * Finish converting internal event set operations to use list iterator callbacks, instead of directly accessing the list structure * Add H5VL_REQUEST_GET_ERR_STACK operation to request subclass, for retrieving a copy of the error stack for a failed asynchronous operation * Remove 'canceled' event status from Java constants * Be safer about releasing resources when inserting a newly opened/created object or file into an event set * Remove H5EStest, add H5ES_WAIT_NONE for 0 timeout, and revise parameters to H5ESwait, to make it more "aggregate". * Remove H5ES_STATUS_CANCELED from Java wrappers also * (a) Add async APIs for H5O module as listed in jira issue ID-283. (b) Remove verification of name parameter in async related routines for H55A and H5L modules because it is checked in H5VL_setup* routine. (c) Modify h5dump expected output due to the async changes. * Corrections based on PR feedback. * Further changes to make based on PR feedback. * Fix missed merge marker, and reformatted line * Clean up some warnings * Correct level of indirection * Relocate prototype (and doxygen info) for H5Aclose * Change non-static function declarations to be static * Ensure that H5TSpublic.h header gets installed (#129) * Add 'wrapper' versions of async calls, to allow language wrappers and layers on top of HDF5 to pass in their application information. * Switch H5Aexists\*_async and H5Lexists\*_async to use flag to return status, instead of return value. Make the corresponding changes through most of the v1 and v2 B-tree code. Clean up warnings in H5public.h and cmpd_dtransform.c. * Add H5Iregister_future routine and tests. * Correct return value for H5Lexists_async * Add H5_DLL macro to public H5ES API routines * Update supported -> flags parameter for introspect_query callback * Remove my email address. Update passthrough VOL connector ID. * Fix comment for post_open_api_common * Remove unused non-blocking VOL connector * Minor cleanup in async branch in preparation for merge to develop * Update CMake and the Autotools to use the new pass-through VOL ID * Fix for SWMR daily test failures (#160) The H5I_register_using_existing_id() call did not initialize the future ID callbacks, causing the library to segfault when it tried to resolve those function pointers. * Added selective async APIs (#150) * Added selective async APIs Description: Added the following APIs: H5Ropen_attr_async H5Ropen_object_async H5Ropen_region_async H5Mcreate_async H5Mopen_async H5Mput_async H5Mget_async H5Mclose_async H5Tcommit_async H5Topen_async H5Tcopy_async H5Tclose_async - Updated an expected output file to include a new internal function in the error stack for the failure case. * Updated async APIs per reviews, including removing async version of H5Tcopy. * Removed statements that were added by mistake in the previous commit. * Fix compile issues in H5M and warnings elsewhere * Reformat code * Brings VOL_LIST changes from develop. (#163) Co-authored-by: Houjun Tang <htang4@lbl.gov> Co-authored-by: Neil Fortner <nfortne2@hdfgroup.org> Co-authored-by: vchoi <vchoi@jelly.ad.hdfgroup.org> Co-authored-by: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Co-authored-by: jhendersonHDF <jhenderson@hdfgroup.org> Co-authored-by: Dana Robinson <derobins@hdfgroup.org> Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> * Basic alignment with async branch (#115)Quincey Koziol2020-11-231-9/+9 | | | | | | | * Basic alignment with async branch - trivial changes to reduce clutter in overall diff. * Update minor error code to reflect change within library * Update the error output to match library * Clang-format of source filesAllen Byrne2020-09-301-430/+442 | * Clean up private / package / static namespace issues (function naming, whichQuincey Koziol2020-08-061-8/+3 | | | | | | header file, FUNC_ENTER / LEAVE, etc). Removed remaining personal email addresses from library source code (still needs cleaned from other directories). Misc. warning, style, and whitespace cleanup. * Trim trailing whitespaceQuincey Koziol2020-04-201-4/+4 | * H5T_copy() constification plus Quincey's contributions.David Young2020-01-291-0/+2 | * Squashed commit of the token_refactoring branch:Dana Robinson2020-01-161-1/+2 | * Small changes from the token_refactoring branch, to reduce the delta to developQuincey Koziol2020-01-041-0/+1 | * Add new H5R API that abstracts object, region and attribute reference typesJerome Soumagne2019-10-081-2/+2 | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Also support references to external files Add new H5T_REF type and type conversion routines Support conversion from H5T_REF_OBJ/DSET_REG to H5T_REF Add H5Treclaim() API to reclaim memory of vlen/reference types Deprecate H5Dvlen_reclaim() Fix H5T_vlen_reclaim() and H5T_reclaim() to use private callback Add H5T_ref_reclaim() Move previous H5R APIs to H5Rdeprec.c Clean up H5Ocopy Separate H5O_copy_expand_ref() to H5Ocopy_ref() Add support for copying new reference types Clean up deprecated routines to go through VOL and same code path Fix return codes in existing trefer.c test Rename trefer.c to trefer_deprec.c trefer.c is for new references Add performance test for trefer Add additional obj_copy_ref test Make use of tokens and blobs to store references Skip blob encoding for object references Start adding new reference examples * Converted H5O MD cache cork calls to use the VOL.Dana Robinson2019-09-271-0/+5 | * Refactored some fsinfo code from H5Fsuper.c to H5Ofsinfo.c.Dana Robinson2019-06-271-0/+4 | * Fix for HDFFV-10808 H5Pset_file_space_strategy succeeds when using ↵Vailin Choi2019-06-241-0/+12 | | | | | | | H5Pset_libver_bounds v18,v18. Fails file creation when non-default free-space info is set in fcpl and the library version high bound is less than v110 because free-space info message is introduced in library release v110. * Delay checking if decoded message's "shareable" flag is appropriate forNeil Fortner2019-01-071-2/+3 | | | | | | | | | | | the message type until we've verified we understand the message type. Reduce size of H5O_msg_class_g to *not* include space for H5O_BOGUS_INVALID. Make bogus messages shareable. Add new bogus message test with shareable messages to cover the formerly problematic code. Re-run gen_bogus.c to add this test case and also to fix the bogus_invalid messages that were no longer H5O_BOGUS_INVLAID due to a new message class being added in a previous commit. Added comment to remind developers to run gen_bogus.c when adding a new message class. * Merge branch 'develop' into dset_ohdr_minimizeJacob Smith2018-12-181-7/+0 |\ | * Moved the native VOL connector's optional enums to theDana Robinson2018-12-151-7/+0 | | | | | | | | | | public headers and renamed to include native/NATIVE in the name. * | Merge branch 'develop' into dset_ohdr_minimizeJacob Smith2018-12-121-4/+19 |\ \ | |/ | * Switch switch remainder of API routines to use VOL callbacks.Quincey Koziol2018-11-101-0/+6 | | | * VOL FEATUREDana Robinson2018-10-101-6/+7 | | | * Remainder of vol_normalization changes (dataset, attribute, files, objects).Dana Robinson2018-09-241-2/+10 | | * | Stash work on object header reduction code and tests.Jacob Smith2018-09-111-0/+12 |/ | | | CMake stuff is not verified. * Fix for HDFFV-10180 Performance issues with H5Oget_info.Vailin Choi2018-04-241-2/+1 | * Merge branch 'develop' of https://bitbucket.hdfgroup.org/scm/hdffv/hdf5 into ↵Quincey Koziol2018-03-181-4/+9 |\ | | | | | | | | | | merge_func_enter_vol Plus initial steps toward merging API context push into FUNC_ENTER_API* macros | * Fix for HDFFV-10355 (CVE-2017-17506).Dana Robinson2018-02-27