summaryrefslogtreecommitdiffstats
path: root/Objects
diff options
context:
space:
mode:
authorStefan Krah <skrah@bytereef.org>2012-02-25 11:24:21 (GMT)
committerStefan Krah <skrah@bytereef.org>2012-02-25 11:24:21 (GMT)
commit9a2d99e28a5c2989b2db4023acae4f550885f2ef (patch)
tree29bb99fc008de30ecc1e765d6d14ee35cd5bdfe5 /Objects
parent5a3d04623b0dc8219326989bc3619d5f56737a94 (diff)
downloadcpython-9a2d99e28a5c2989b2db4023acae4f550885f2ef.zip
cpython-9a2d99e28a5c2989b2db4023acae4f550885f2ef.tar.gz
cpython-9a2d99e28a5c2989b2db4023acae4f550885f2ef.tar.bz2
- Issue #10181: New memoryview implementation fixes multiple ownership
and lifetime issues of dynamically allocated Py_buffer members (#9990) as well as crashes (#8305, #7433). Many new features have been added (See whatsnew/3.3), and the documentation has been updated extensively. The ndarray test object from _testbuffer.c implements all aspects of PEP-3118, so further development towards the complete implementation of the PEP can proceed in a test-driven manner. Thanks to Nick Coghlan, Antoine Pitrou and Pauli Virtanen for review and many ideas. - Issue #12834: Fix incorrect results of memoryview.tobytes() for non-contiguous arrays. - Issue #5231: Introduce memoryview.cast() method that allows changing format and shape without making a copy of the underlying memory.
Diffstat (limited to 'Objects')
-rw-r--r--Objects/abstract.c14
-rw-r--r--Objects/memoryobject.c2865
-rw-r--r--Objects/object.c3
3 files changed, 2312 insertions, 570 deletions
diff --git a/Objects/abstract.c b/Objects/abstract.c
index 47010d6..62fccdc 100644
--- a/Objects/abstract.c
+++ b/Objects/abstract.c
@@ -340,7 +340,7 @@ PyObject_GetBuffer(PyObject *obj, Py_buffer *view, int flags)
}
static int
-_IsFortranContiguous(Py_buffer *view)
+_IsFortranContiguous(const Py_buffer *view)
{
Py_ssize_t sd, dim;
int i;
@@ -361,7 +361,7 @@ _IsFortranContiguous(Py_buffer *view)
}
static int
-_IsCContiguous(Py_buffer *view)
+_IsCContiguous(const Py_buffer *view)
{
Py_ssize_t sd, dim;
int i;
@@ -382,16 +382,16 @@ _IsCContiguous(Py_buffer *view)
}
int
-PyBuffer_IsContiguous(Py_buffer *view, char fort)
+PyBuffer_IsContiguous(const Py_buffer *view, char order)
{
if (view->suboffsets != NULL) return 0;
- if (fort == 'C')
+ if (order == 'C')
return _IsCContiguous(view);
- else if (fort == 'F')
+ else if (order == 'F')
return _IsFortranContiguous(view);
- else if (fort == 'A')
+ else if (order == 'A')
return (_IsCContiguous(view) || _IsFortranContiguous(view));
return 0;
}
@@ -651,7 +651,7 @@ int
PyBuffer_FillInfo(Py_buffer *view, PyObject *obj, void *buf, Py_ssize_t len,
int readonly, int flags)
{
- if (view == NULL) return 0;
+ if (view == NULL) return 0; /* XXX why not -1? */
if (((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE) &&
(readonly == 1)) {
PyErr_SetString(PyExc_BufferError,
diff --git a/Objects/memoryobject.c b/Objects/memoryobject.c
index 295a742..e87abf5 100644
--- a/Objects/memoryobject.c
+++ b/Objects/memoryobject.c
@@ -1,127 +1,918 @@
-
/* Memoryview object implementation */
#include "Python.h"
+#include <stddef.h>
+
+
+/****************************************************************************/
+/* ManagedBuffer Object */
+/****************************************************************************/
+
+/*
+ ManagedBuffer Object:
+ ---------------------
+
+ The purpose of this object is to facilitate the handling of chained
+ memoryviews that have the same underlying exporting object. PEP-3118
+ allows the underlying object to change while a view is exported. This
+ could lead to unexpected results when constructing a new memoryview
+ from an existing memoryview.
+
+ Rather than repeatedly redirecting buffer requests to the original base
+ object, all chained memoryviews use a single buffer snapshot. This
+ snapshot is generated by the constructor _PyManagedBuffer_FromObject().
+
+ Ownership rules:
+ ----------------
+
+ The master buffer inside a managed buffer is filled in by the original
+ base object. shape, strides, suboffsets and format are read-only for
+ all consumers.
+
+ A memoryview's buffer is a private copy of the exporter's buffer. shape,
+ strides and suboffsets belong to the memoryview and are thus writable.
+
+ If a memoryview itself exports several buffers via memory_getbuf(), all
+ buffer copies share shape, strides and suboffsets. In this case, the
+ arrays are NOT writable.
+
+ Reference count assumptions:
+ ----------------------------
+
+ The 'obj' member of a Py_buffer must either be NULL or refer to the
+ exporting base object. In the Python codebase, all getbufferprocs
+ return a new reference to view.obj (example: bytes_buffer_getbuffer()).
+
+ PyBuffer_Release() decrements view.obj (if non-NULL), so the
+ releasebufferprocs must NOT decrement view.obj.
+*/
+
-#define IS_RELEASED(memobj) \
- (((PyMemoryViewObject *) memobj)->view.buf == NULL)
+#define XSTRINGIZE(v) #v
+#define STRINGIZE(v) XSTRINGIZE(v)
-#define CHECK_RELEASED(memobj) \
- if (IS_RELEASED(memobj)) { \
- PyErr_SetString(PyExc_ValueError, \
- "operation forbidden on released memoryview object"); \
- return NULL; \
+#define CHECK_MBUF_RELEASED(mbuf) \
+ if (((_PyManagedBufferObject *)mbuf)->flags&_Py_MANAGED_BUFFER_RELEASED) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "operation forbidden on released memoryview object"); \
+ return NULL; \
}
-#define CHECK_RELEASED_INT(memobj) \
- if (IS_RELEASED(memobj)) { \
- PyErr_SetString(PyExc_ValueError, \
- "operation forbidden on released memoryview object"); \
- return -1; \
+
+Py_LOCAL_INLINE(_PyManagedBufferObject *)
+mbuf_alloc(void)
+{
+ _PyManagedBufferObject *mbuf;
+
+ mbuf = (_PyManagedBufferObject *)
+ PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
+ if (mbuf == NULL)
+ return NULL;
+ mbuf->flags = 0;
+ mbuf->exports = 0;
+ mbuf->master.obj = NULL;
+ _PyObject_GC_TRACK(mbuf);
+
+ return mbuf;
+}
+
+static PyObject *
+_PyManagedBuffer_FromObject(PyObject *base)
+{
+ _PyManagedBufferObject *mbuf;
+
+ mbuf = mbuf_alloc();
+ if (mbuf == NULL)
+ return NULL;
+
+ if (PyObject_GetBuffer(base, &mbuf->master, PyBUF_FULL_RO) < 0) {
+ /* mbuf->master.obj must be NULL. */
+ Py_DECREF(mbuf);
+ return NULL;
}
-static Py_ssize_t
-get_shape0(Py_buffer *buf)
-{
- if (buf->shape != NULL)
- return buf->shape[0];
- if (buf->ndim == 0)
- return 1;
- PyErr_SetString(PyExc_TypeError,
- "exported buffer does not have any shape information associated "
- "to it");
- return -1;
+ /* Assume that master.obj is a new reference to base. */
+ assert(mbuf->master.obj == base);
+
+ return (PyObject *)mbuf;
}
static void
-dup_buffer(Py_buffer *dest, Py_buffer *src)
+mbuf_release(_PyManagedBufferObject *self)
{
- *dest = *src;
- if (src->ndim == 1 && src->shape != NULL) {
- dest->shape = &(dest->smalltable[0]);
- dest->shape[0] = get_shape0(src);
- }
- if (src->ndim == 1 && src->strides != NULL) {
- dest->strides = &(dest->smalltable[1]);
- dest->strides[0] = src->strides[0];
- }
+ if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
+ return;
+
+ /* NOTE: at this point self->exports can still be > 0 if this function
+ is called from mbuf_clear() to break up a reference cycle. */
+ self->flags |= _Py_MANAGED_BUFFER_RELEASED;
+
+ /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
+ _PyObject_GC_UNTRACK(self);
+ PyBuffer_Release(&self->master);
+}
+
+static void
+mbuf_dealloc(_PyManagedBufferObject *self)
+{
+ assert(self->exports == 0);
+ mbuf_release(self);
+ if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
+ PyMem_Free(self->master.format);
+ PyObject_GC_Del(self);
}
static int
-memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
+mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
{
- int res = 0;
- CHECK_RELEASED_INT(self);
- if (self->view.obj != NULL)
- res = PyObject_GetBuffer(self->view.obj, view, flags);
- if (view)
- dup_buffer(view, &self->view);
- return res;
+ Py_VISIT(self->master.obj);
+ return 0;
}
-static void
-memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
+static int
+mbuf_clear(_PyManagedBufferObject *self)
{
- PyBuffer_Release(view);
+ assert(self->exports >= 0);
+ mbuf_release(self);
+ return 0;
}
+PyTypeObject _PyManagedBuffer_Type = {
+ PyVarObject_HEAD_INIT(&PyType_Type, 0)
+ "managedbuffer",
+ sizeof(_PyManagedBufferObject),
+ 0,
+ (destructor)mbuf_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)mbuf_traverse, /* tp_traverse */
+ (inquiry)mbuf_clear /* tp_clear */
+};
+
+
+/****************************************************************************/
+/* MemoryView Object */
+/****************************************************************************/
+
+/* In the process of breaking reference cycles mbuf_release() can be
+ called before memory_release(). */
+#define BASE_INACCESSIBLE(mv) \
+ (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
+ ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
+
+#define CHECK_RELEASED(mv) \
+ if (BASE_INACCESSIBLE(mv)) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "operation forbidden on released memoryview object"); \
+ return NULL; \
+ }
+
+#define CHECK_RELEASED_INT(mv) \
+ if (BASE_INACCESSIBLE(mv)) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "operation forbidden on released memoryview object"); \
+ return -1; \
+ }
+
+#define CHECK_LIST_OR_TUPLE(v) \
+ if (!PyList_Check(v) && !PyTuple_Check(v)) { \
+ PyErr_SetString(PyExc_TypeError, \
+ #v " must be a list or a tuple"); \
+ return NULL; \
+ }
+
+#define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
+
+/* Check for the presence of suboffsets in the first dimension. */
+#define HAVE_PTR(suboffsets) (suboffsets && suboffsets[0] >= 0)
+/* Adjust ptr if suboffsets are present. */
+#define ADJUST_PTR(ptr, suboffsets) \
+ (HAVE_PTR(suboffsets) ? *((char**)ptr) + suboffsets[0] : ptr)
+
+/* Memoryview buffer properties */
+#define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
+#define MV_F_CONTIGUOUS(flags) \
+ (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
+#define MV_ANY_CONTIGUOUS(flags) \
+ (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
+
+/* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
+#define MV_CONTIGUOUS_NDIM1(view) \
+ ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
+
+/* getbuffer() requests */
+#define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
+#define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
+#define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
+#define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
+#define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
+#define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
+#define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
+#define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
+
+
PyDoc_STRVAR(memory_doc,
"memoryview(object)\n\
\n\
Create a new memoryview object which references the given object.");
+
+/**************************************************************************/
+/* Copy memoryview buffers */
+/**************************************************************************/
+
+/* The functions in this section take a source and a destination buffer
+ with the same logical structure: format, itemsize, ndim and shape
+ are identical, with ndim > 0.
+
+ NOTE: All buffers are assumed to have PyBUF_FULL information, which
+ is the case for memoryviews! */
+
+
+/* Assumptions: ndim >= 1. The macro tests for a corner case that should
+ perhaps be explicitly forbidden in the PEP. */
+#define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
+ (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
+
+Py_LOCAL_INLINE(int)
+last_dim_is_contiguous(Py_buffer *dest, Py_buffer *src)
+{
+ assert(dest->ndim > 0 && src->ndim > 0);
+ return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
+ !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
+ dest->strides[dest->ndim-1] == dest->itemsize &&
+ src->strides[src->ndim-1] == src->itemsize);
+}
+
+/* Check that the logical structure of the destination and source buffers
+ is identical. */
+static int
+cmp_structure(Py_buffer *dest, Py_buffer *src)
+{
+ const char *dfmt, *sfmt;
+ int i;
+
+ assert(dest->format && src->format);
+ dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
+ sfmt = src->format[0] == '@' ? src->format+1 : src->format;
+
+ if (strcmp(dfmt, sfmt) != 0 ||
+ dest->itemsize != src->itemsize ||
+ dest->ndim != src->ndim) {
+ goto value_error;
+ }
+
+ for (i = 0; i < dest->ndim; i++) {
+ if (dest->shape[i] != src->shape[i])
+ goto value_error;
+ if (dest->shape[i] == 0)
+ break;
+ }
+
+ return 0;
+
+value_error:
+ PyErr_SetString(PyExc_ValueError,
+ "ndarray assignment: lvalue and rvalue have different structures");
+ return -1;
+}
+
+/* Base case for recursive multi-dimensional copying. Contiguous arrays are
+ copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
+ sizeof(mem) == shape[0] * itemsize. */
+static void
+copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
+ char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
+ char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
+ char *mem)
+{
+ if (mem == NULL) { /* contiguous */
+ Py_ssize_t size = shape[0] * itemsize;
+ if (dptr + size < sptr || sptr + size < dptr)
+ memcpy(dptr, sptr, size); /* no overlapping */
+ else
+ memmove(dptr, sptr, size);
+ }
+ else {
+ char *p;
+ Py_ssize_t i;
+ for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
+ char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
+ memcpy(p, xsptr, itemsize);
+ }
+ for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
+ char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
+ memcpy(xdptr, p, itemsize);
+ }
+ }
+
+}
+
+/* Recursively copy a source buffer to a destination buffer. The two buffers
+ have the same ndim, shape and itemsize. */
+static void
+copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
+ char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
+ char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
+ char *mem)
+{
+ Py_ssize_t i;
+
+ assert(ndim >= 1);
+
+ if (ndim == 1) {
+ copy_base(shape, itemsize,
+ dptr, dstrides, dsuboffsets,
+ sptr, sstrides, ssuboffsets,
+ mem);
+ return;
+ }
+
+ for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
+ char *xdptr = ADJUST_PTR(dptr, dsuboffsets);
+ char *xsptr = ADJUST_PTR(sptr, ssuboffsets);
+
+ copy_rec(shape+1, ndim-1, itemsize,
+ xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
+ xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
+ mem);
+ }
+}
+
+/* Faster copying of one-dimensional arrays. */
+static int
+copy_single(Py_buffer *dest, Py_buffer *src)
+{
+ char *mem = NULL;
+
+ assert(dest->ndim == 1);
+
+ if (cmp_structure(dest, src) < 0)
+ return -1;
+
+ if (!last_dim_is_contiguous(dest, src)) {
+ mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
+ if (mem == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ }
+
+ copy_base(dest->shape, dest->itemsize,
+ dest->buf, dest->strides, dest->suboffsets,
+ src->buf, src->strides, src->suboffsets,
+ mem);
+
+ if (mem)
+ PyMem_Free(mem);
+
+ return 0;
+}
+
+/* Recursively copy src to dest. Both buffers must have the same basic
+ structure. Copying is atomic, the function never fails with a partial
+ copy. */
+static int
+copy_buffer(Py_buffer *dest, Py_buffer *src)
+{
+ char *mem = NULL;
+
+ assert(dest->ndim > 0);
+
+ if (cmp_structure(dest, src) < 0)
+ return -1;
+
+ if (!last_dim_is_contiguous(dest, src)) {
+ mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
+ if (mem == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ }
+
+ copy_rec(dest->shape, dest->ndim, dest->itemsize,
+ dest->buf, dest->strides, dest->suboffsets,
+ src->buf, src->strides, src->suboffsets,
+ mem);
+
+ if (mem)
+ PyMem_Free(mem);
+
+ return 0;
+}
+
+/* Initialize strides for a C-contiguous array. */
+Py_LOCAL_INLINE(void)
+init_strides_from_shape(Py_buffer *view)
+{
+ Py_ssize_t i;
+
+ assert(view->ndim > 0);
+
+ view->strides[view->ndim-1] = view->itemsize;
+ for (i = view->ndim-2; i >= 0; i--)
+ view->strides[i] = view->strides[i+1] * view->shape[i+1];
+}
+
+/* Initialize strides for a Fortran-contiguous array. */
+Py_LOCAL_INLINE(void)
+init_fortran_strides_from_shape(Py_buffer *view)
+{
+ Py_ssize_t i;
+
+ assert(view->ndim > 0);
+
+ view->strides[0] = view->itemsize;
+ for (i = 1; i < view->ndim; i++)
+ view->strides[i] = view->strides[i-1] * view->shape[i-1];
+}
+
+/* Copy src to a C-contiguous representation. Assumptions:
+ len(mem) == src->len. */
+static int
+buffer_to_c_contiguous(char *mem, Py_buffer *src)
+{
+ Py_buffer dest;
+ Py_ssize_t *strides;
+ int ret;
+
+ assert(src->shape != NULL);
+ assert(src->strides != NULL);
+
+ strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
+ if (strides == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ /* initialize dest as a C-contiguous buffer */
+ dest = *src;
+ dest.buf = mem;
+ /* shape is constant and shared */
+ dest.strides = strides;
+ init_strides_from_shape(&dest);
+ dest.suboffsets = NULL;
+
+ ret = copy_buffer(&dest, src);
+
+ PyMem_Free(strides);
+ return ret;
+}
+
+
+/****************************************************************************/
+/* Constructors */
+/****************************************************************************/
+
+/* Initialize values that are shared with the managed buffer. */
+Py_LOCAL_INLINE(void)
+init_shared_values(Py_buffer *dest, const Py_buffer *src)
+{
+ dest->obj = src->obj;
+ dest->buf = src->buf;
+ dest->len = src->len;
+ dest->itemsize = src->itemsize;
+ dest->readonly = src->readonly;
+ dest->format = src->format ? src->format : "B";
+ dest->internal = src->internal;
+}
+
+/* Copy shape and strides. Reconstruct missing values. */
+static void
+init_shape_strides(Py_buffer *dest, const Py_buffer *src)
+{
+ Py_ssize_t i;
+
+ if (src->ndim == 0) {
+ dest->shape = NULL;
+ dest->strides = NULL;
+ return;
+ }
+ if (src->ndim == 1) {
+ dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
+ dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
+ return;
+ }
+
+ for (i = 0; i < src->ndim; i++)
+ dest->shape[i] = src->shape[i];
+ if (src->strides) {
+ for (i = 0; i < src->ndim; i++)
+ dest->strides[i] = src->strides[i];
+ }
+ else {
+ init_strides_from_shape(dest);
+ }
+}
+
+Py_LOCAL_INLINE(void)
+init_suboffsets(Py_buffer *dest, const Py_buffer *src)
+{
+ Py_ssize_t i;
+
+ if (src->suboffsets == NULL) {
+ dest->suboffsets = NULL;
+ return;
+ }
+ for (i = 0; i < src->ndim; i++)
+ dest->suboffsets[i] = src->suboffsets[i];
+}
+
+/* len = product(shape) * itemsize */
+Py_LOCAL_INLINE(void)
+init_len(Py_buffer *view)
+{
+ Py_ssize_t i, len;
+
+ len = 1;
+ for (i = 0; i < view->ndim; i++)
+ len *= view->shape[i];
+ len *= view->itemsize;
+
+ view->len = len;
+}
+
+/* Initialize memoryview buffer properties. */
+static void
+init_flags(PyMemoryViewObject *mv)
+{
+ const Py_buffer *view = &mv->view;
+ int flags = 0;
+
+ switch (view->ndim) {
+ case 0:
+ flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
+ _Py_MEMORYVIEW_FORTRAN);
+ break;
+ case 1:
+ if (MV_CONTIGUOUS_NDIM1(view))
+ flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
+ break;
+ default:
+ if (PyBuffer_IsContiguous(view, 'C'))
+ flags |= _Py_MEMORYVIEW_C;
+ if (PyBuffer_IsContiguous(view, 'F'))
+ flags |= _Py_MEMORYVIEW_FORTRAN;
+ break;
+ }
+
+ if (view->suboffsets) {
+ flags |= _Py_MEMORYVIEW_PIL;
+ flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
+ }
+
+ mv->flags = flags;
+}
+
+/* Allocate a new memoryview and perform basic initialization. New memoryviews
+ are exclusively created through the mbuf_add functions. */
+Py_LOCAL_INLINE(PyMemoryViewObject *)
+memory_alloc(int ndim)
+{
+ PyMemoryViewObject *mv;
+
+ mv = (PyMemoryViewObject *)
+ PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
+ if (mv == NULL)
+ return NULL;
+
+ mv->mbuf = NULL;
+ mv->hash = -1;
+ mv->flags = 0;
+ mv->exports = 0;
+ mv->view.ndim = ndim;
+ mv->view.shape = mv->ob_array;
+ mv->view.strides = mv->ob_array + ndim;
+ mv->view.suboffsets = mv->ob_array + 2 * ndim;
+
+ _PyObject_GC_TRACK(mv);
+ return mv;
+}
+
+/*
+ Return a new memoryview that is registered with mbuf. If src is NULL,
+ use mbuf->master as the underlying buffer. Otherwise, use src.
+
+ The new memoryview has full buffer information: shape and strides
+ are always present, suboffsets as needed. Arrays are copied to
+ the memoryview's ob_array field.
+ */
+static PyObject *
+mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
+{
+ PyMemoryViewObject *mv;
+ Py_buffer *dest;
+
+ if (src == NULL)
+ src = &mbuf->master;
+
+ if (src->ndim > PyBUF_MAX_NDIM) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: number of dimensions must not exceed "
+ STRINGIZE(PyBUF_MAX_NDIM));
+ return NULL;
+ }
+
+ mv = memory_alloc(src->ndim);
+ if (mv == NULL)
+ return NULL;
+
+ dest = &mv->view;
+ init_shared_values(dest, src);
+ init_shape_strides(dest, src);
+ init_suboffsets(dest, src);
+ init_flags(mv);
+
+ mv->mbuf = mbuf;
+ Py_INCREF(mbuf);
+ mbuf->exports++;
+
+ return (PyObject *)mv;
+}
+
+/* Register an incomplete view: shape, strides, suboffsets and flags still
+ need to be initialized. Use 'ndim' instead of src->ndim to determine the
+ size of the memoryview's ob_array.
+
+ Assumption: ndim <= PyBUF_MAX_NDIM. */
+static PyObject *
+mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
+ int ndim)
+{
+ PyMemoryViewObject *mv;
+ Py_buffer *dest;
+
+ if (src == NULL)
+ src = &mbuf->master;
+
+ assert(ndim <= PyBUF_MAX_NDIM);
+
+ mv = memory_alloc(ndim);
+ if (mv == NULL)
+ return NULL;
+
+ dest = &mv->view;
+ init_shared_values(dest, src);
+
+ mv->mbuf = mbuf;
+ Py_INCREF(mbuf);
+ mbuf->exports++;
+
+ return (PyObject *)mv;
+}
+
+/* Expose a raw memory area as a view of contiguous bytes. flags can be
+ PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
+ The memoryview has complete buffer information. */
+PyObject *
+PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
+{
+ _PyManagedBufferObject *mbuf;
+ PyObject *mv;
+ int readonly;
+
+ assert(mem != NULL);
+ assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
+
+ mbuf = mbuf_alloc();
+ if (mbuf == NULL)
+ return NULL;
+
+ readonly = (flags == PyBUF_WRITE) ? 0 : 1;
+ (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
+ PyBUF_FULL_RO);
+
+ mv = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+
+ return mv;
+}
+
+/* Create a memoryview from a given Py_buffer. For simple byte views,
+ PyMemoryView_FromMemory() should be used instead.
+ This function is the only entry point that can create a master buffer
+ without full information. Because of this fact init_shape_strides()
+ must be able to reconstruct missing values. */
PyObject *
PyMemoryView_FromBuffer(Py_buffer *info)
{
- PyMemoryViewObject *mview;
+ _PyManagedBufferObject *mbuf;
+ PyObject *mv;
if (info->buf == NULL) {
PyErr_SetString(PyExc_ValueError,
- "cannot make memory view from a buffer with a NULL data pointer");
+ "PyMemoryView_FromBuffer(): info->buf must not be NULL");
return NULL;
}
- mview = (PyMemoryViewObject *)
- PyObject_GC_New(PyMemoryViewObject, &PyMemoryView_Type);
- if (mview == NULL)
+
+ mbuf = mbuf_alloc();
+ if (mbuf == NULL)
return NULL;
- mview->hash = -1;
- dup_buffer(&mview->view, info);
- /* NOTE: mview->view.obj should already have been incref'ed as
- part of PyBuffer_FillInfo(). */
- _PyObject_GC_TRACK(mview);
- return (PyObject *)mview;
+
+ /* info->obj is either NULL or a borrowed reference. This reference
+ should not be decremented in PyBuffer_Release(). */
+ mbuf->master = *info;
+ mbuf->master.obj = NULL;
+
+ mv = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+
+ return mv;
}
+/* Create a memoryview from an object that implements the buffer protocol.
+ If the object is a memoryview, the new memoryview must be registered
+ with the same managed buffer. Otherwise, a new managed buffer is created. */
PyObject *
-PyMemoryView_FromObject(PyObject *base)
+PyMemoryView_FromObject(PyObject *v)
{
- PyMemoryViewObject *mview;
- Py_buffer view;
+ _PyManagedBufferObject *mbuf;
- if (!PyObject_CheckBuffer(base)) {
- PyErr_SetString(PyExc_TypeError,
- "cannot make memory view because object does "
- "not have the buffer interface");
+ if (PyMemoryView_Check(v)) {
+ PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
+ CHECK_RELEASED(mv);
+ return mbuf_add_view(mv->mbuf, &mv->view);
+ }
+ else if (PyObject_CheckBuffer(v)) {
+ PyObject *ret;
+ mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v);
+ if (mbuf == NULL)
+ return NULL;
+ ret = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+ return ret;
+ }
+
+ PyErr_Format(PyExc_TypeError,
+ "memoryview: %.200s object does not have the buffer interface",
+ Py_TYPE(v)->tp_name);
+ return NULL;
+}
+
+/* Copy the format string from a base object that might vanish. */
+static int
+mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
+{
+ if (fmt != NULL) {
+ char *cp = PyMem_Malloc(strlen(fmt)+1);
+ if (cp == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ mbuf->master.format = strcpy(cp, fmt);
+ mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
+ }
+
+ return 0;
+}
+
+/*
+ Return a memoryview that is based on a contiguous copy of src.
+ Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
+
+ Ownership rules:
+ 1) As usual, the returned memoryview has a private copy
+ of src->shape, src->strides and src->suboffsets.
+ 2) src->format is copied to the master buffer and released
+ in mbuf_dealloc(). The releasebufferproc of the bytes
+ object is NULL, so it does not matter that mbuf_release()
+ passes the altered format pointer to PyBuffer_Release().
+*/
+static PyObject *
+memory_from_contiguous_copy(Py_buffer *src, char order)
+{
+ _PyManagedBufferObject *mbuf;
+ PyMemoryViewObject *mv;
+ PyObject *bytes;
+ Py_buffer *dest;
+ int i;
+
+ assert(src->ndim > 0);
+ assert(src->shape != NULL);
+
+ bytes = PyBytes_FromStringAndSize(NULL, src->len);
+ if (bytes == NULL)
+ return NULL;
+
+ mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes);
+ Py_DECREF(bytes);
+ if (mbuf == NULL)
+ return NULL;
+
+ if (mbuf_copy_format(mbuf, src->format) < 0) {
+ Py_DECREF(mbuf);
+ return NULL;
+ }
+
+ mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
+ Py_DECREF(mbuf);
+ if (mv == NULL)
return NULL;
+
+ dest = &mv->view;
+
+ /* shared values are initialized correctly except for itemsize */
+ dest->itemsize = src->itemsize;
+
+ /* shape and strides */
+ for (i = 0; i < src->ndim; i++) {
+ dest->shape[i] = src->shape[i];
+ }
+ if (order == 'C' || order == 'A') {
+ init_strides_from_shape(dest);
}
+ else {
+ init_fortran_strides_from_shape(dest);
+ }
+ /* suboffsets */
+ dest->suboffsets = NULL;
+
+ /* flags */
+ init_flags(mv);
+
+ if (copy_buffer(dest, src) < 0) {
+ Py_DECREF(mv);
+ return NULL;
+ }
+
+ return (PyObject *)mv;
+}
+
+/*
+ Return a new memoryview object based on a contiguous exporter with
+ buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
+ The logical structure of the input and output buffers is the same
+ (i.e. tolist(input) == tolist(output)), but the physical layout in
+ memory can be explicitly chosen.
+
+ As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
+ otherwise it may be writable or read-only.
+
+ If the exporter is already contiguous with the desired target order,
+ the memoryview will be directly based on the exporter.
+
+ Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
+ based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
+ 'F'ortran order otherwise.
+*/
+PyObject *
+PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
+{
+ PyMemoryViewObject *mv;
+ PyObject *ret;
+ Py_buffer *view;
+
+ assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
+ assert(order == 'C' || order == 'F' || order == 'A');
- if (PyObject_GetBuffer(base, &view, PyBUF_FULL_RO) < 0)
+ mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
+ if (mv == NULL)
return NULL;
- mview = (PyMemoryViewObject *)PyMemoryView_FromBuffer(&view);
- if (mview == NULL) {
- PyBuffer_Release(&view);
+ view = &mv->view;
+ if (buffertype == PyBUF_WRITE && view->readonly) {
+ PyErr_SetString(PyExc_BufferError,
+ "underlying buffer is not writable");
+ Py_DECREF(mv);
+ return NULL;
+ }
+
+ if (PyBuffer_IsContiguous(view, order))
+ return (PyObject *)mv;
+
+ if (buffertype == PyBUF_WRITE) {
+ PyErr_SetString(PyExc_BufferError,
+ "writable contiguous buffer requested "
+ "for a non-contiguous object.");
+ Py_DECREF(mv);
return NULL;
}
- return (PyObject *)mview;
+ ret = memory_from_contiguous_copy(view, order);
+ Py_DECREF(mv);
+ return ret;
}
+
static PyObject *
memory_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
{
PyObject *obj;
- static char *kwlist[] = {"object", 0};
+ static char *kwlist[] = {"object", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:memoryview", kwlist,
&obj)) {
@@ -132,478 +923,1106 @@ memory_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
}
+/****************************************************************************/
+/* Release/GC management */
+/****************************************************************************/
+
+/* Inform the managed buffer that this particular memoryview will not access
+ the underlying buffer again. If no other memoryviews are registered with
+ the managed buffer, the underlying buffer is released instantly and
+ marked as inaccessible for both the memoryview and the managed buffer.
+
+ This function fails if the memoryview itself has exported buffers. */
+static int
+_memory_release(PyMemoryViewObject *self)
+{
+ if (self->flags & _Py_MEMORYVIEW_RELEASED)
+ return 0;
+
+ if (self->exports == 0) {
+ self->flags |= _Py_MEMORYVIEW_RELEASED;
+ assert(self->mbuf->exports > 0);
+ if (--self->mbuf->exports == 0)
+ mbuf_release(self->mbuf);
+ return 0;
+ }
+ if (self->exports > 0) {
+ PyErr_Format(PyExc_BufferError,
+ "memoryview has %zd exported buffer%s", self->exports,
+ self->exports==1 ? "" : "s");
+ return -1;
+ }
+
+ Py_FatalError("_memory_release(): negative export count");
+ return -1;
+}
+
+static PyObject *
+memory_release(PyMemoryViewObject *self)
+{
+ if (_memory_release(self) < 0)
+ return NULL;
+ Py_RETURN_NONE;
+}
+
static void
-_strided_copy_nd(char *dest, char *src, int nd, Py_ssize_t *shape,
- Py_ssize_t *strides, Py_ssize_t itemsize, char fort)
+memory_dealloc(PyMemoryViewObject *self)
{
- int k;
- Py_ssize_t outstride;
+ assert(self->exports == 0);
+ _PyObject_GC_UNTRACK(self);
+ (void)_memory_release(self);
+ Py_CLEAR(self->mbuf);
+ PyObject_GC_Del(self);
+}
- if (nd==0) {
- memcpy(dest, src, itemsize);
- }
- else if (nd == 1) {
- for (k = 0; k<shape[0]; k++) {
- memcpy(dest, src, itemsize);
- dest += itemsize;
- src += strides[0];
- }
+static int
+memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->mbuf);
+ return 0;
+}
+
+static int
+memory_clear(PyMemoryViewObject *self)
+{
+ (void)_memory_release(self);
+ Py_CLEAR(self->mbuf);
+ return 0;
+}
+
+static PyObject *
+memory_enter(PyObject *self, PyObject *args)
+{
+ CHECK_RELEASED(self);
+ Py_INCREF(self);
+ return self;
+}
+
+static PyObject *
+memory_exit(PyObject *self, PyObject *args)
+{
+ return memory_release((PyMemoryViewObject *)self);
+}
+
+
+/****************************************************************************/
+/* Casting format and shape */
+/****************************************************************************/
+
+#define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
+
+Py_LOCAL_INLINE(Py_ssize_t)
+get_native_fmtchar(char *result, const char *fmt)
+{
+ Py_ssize_t size = -1;
+
+ if (fmt[0] == '@') fmt++;
+
+ switch (fmt[0]) {
+ case 'c': case 'b': case 'B': size = sizeof(char); break;
+ case 'h': case 'H': size = sizeof(short); break;
+ case 'i': case 'I': size = sizeof(int); break;
+ case 'l': case 'L': size = sizeof(long); break;
+ #ifdef HAVE_LONG_LONG
+ case 'q': case 'Q': size = sizeof(PY_LONG_LONG); break;
+ #endif
+ case 'n': case 'N': size = sizeof(Py_ssize_t); break;
+ case 'f': size = sizeof(float); break;
+ case 'd': size = sizeof(double); break;
+ #ifdef HAVE_C99_BOOL
+ case '?': size = sizeof(_Bool); break;
+ #else
+ case '?': size = sizeof(char); break;
+ #endif
+ case 'P': size = sizeof(void *); break;
}
- else {
- if (fort == 'F') {
- /* Copy first dimension first,
- second dimension second, etc...
- Set up the recursive loop backwards so that final
- dimension is actually copied last.
- */
- outstride = itemsize;
- for (k=1; k<nd-1;k++) {
- outstride *= shape[k];
- }
- for (k=0; k<shape[nd-1]; k++) {
- _strided_copy_nd(dest, src, nd-1, shape,
- strides, itemsize, fort);
- dest += outstride;
- src += strides[nd-1];
- }
- }
- else {
- /* Copy last dimension first,
- second-to-last dimension second, etc.
- Set up the recursion so that the
- first dimension is copied last
- */
- outstride = itemsize;
- for (k=1; k < nd; k++) {
- outstride *= shape[k];
- }
- for (k=0; k<shape[0]; k++) {
- _strided_copy_nd(dest, src, nd-1, shape+1,
- strides+1, itemsize,
- fort);
- dest += outstride;
- src += strides[0];
- }
- }
+ if (size > 0 && fmt[1] == '\0') {
+ *result = fmt[0];
+ return size;
}
- return;
+
+ return -1;
}
+/* Cast a memoryview's data type to 'format'. The input array must be
+ C-contiguous. At least one of input-format, output-format must have
+ byte size. The output array is 1-D, with the same byte length as the
+ input array. Thus, view->len must be a multiple of the new itemsize. */
static int
-_indirect_copy_nd(char *dest, Py_buffer *view, char fort)
+cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
{
- Py_ssize_t *indices;
- int k;
- Py_ssize_t elements;
- char *ptr;
- void (*func)(int, Py_ssize_t *, const Py_ssize_t *);
+ Py_buffer *view = &mv->view;
+ PyObject *asciifmt;
+ char srcchar, destchar;
+ Py_ssize_t itemsize;
+ int ret = -1;
+
+ assert(view->ndim >= 1);
+ assert(Py_SIZE(mv) == 3*view->ndim);
+ assert(view->shape == mv->ob_array);
+ assert(view->strides == mv->ob_array + view->ndim);
+ assert(view->suboffsets == mv->ob_array + 2*view->ndim);
+
+ if (get_native_fmtchar(&srcchar, view->format) < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: source format must be a native single character "
+ "format prefixed with an optional '@'");
+ return ret;
+ }
- if (view->ndim > PY_SSIZE_T_MAX / sizeof(Py_ssize_t)) {
- PyErr_NoMemory();
- return -1;
+ asciifmt = PyUnicode_AsASCIIString(format);
+ if (asciifmt == NULL)
+ return ret;
+
+ itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
+ if (itemsize < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: destination format must be a native single "
+ "character format prefixed with an optional '@'");
+ goto out;
}
- indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*view->ndim);
- if (indices == NULL) {
- PyErr_NoMemory();
- return -1;
+ if (!IS_BYTE_FORMAT(srcchar) && !IS_BYTE_FORMAT(destchar)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: cannot cast between two non-byte formats");
+ goto out;
}
- for (k=0; k<view->ndim;k++) {
- indices[k] = 0;
+ if (view->len % itemsize) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: length is not a multiple of itemsize");
+ goto out;
}
- elements = 1;
- for (k=0; k<view->ndim; k++) {
- elements *= view->shape[k];
+ strncpy(mv->format, PyBytes_AS_STRING(asciifmt),
+ _Py_MEMORYVIEW_MAX_FORMAT);
+ mv->format[_Py_MEMORYVIEW_MAX_FORMAT-1] = '\0';
+ view->format = mv->format;
+ view->itemsize = itemsize;
+
+ view->ndim = 1;
+ view->shape[0] = view->len / view->itemsize;
+ view->strides[0] = view->itemsize;
+ view->suboffsets = NULL;
+
+ init_flags(mv);
+
+ ret = 0;
+
+out:
+ Py_DECREF(asciifmt);
+ return ret;
+}
+
+/* The memoryview must have space for 3*len(seq) elements. */
+static Py_ssize_t
+copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
+ Py_ssize_t itemsize)
+{
+ Py_ssize_t x, i;
+ Py_ssize_t len = itemsize;
+
+ for (i = 0; i < ndim; i++) {
+ PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
+ if (!PyLong_Check(tmp)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview.cast(): elements of shape must be integers");
+ return -1;
+ }
+ x = PyLong_AsSsize_t(tmp);
+ if (x == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+ if (x <= 0) {
+ /* In general elements of shape may be 0, but not for casting. */
+ PyErr_Format(PyExc_ValueError,
+ "memoryview.cast(): elements of shape must be integers > 0");
+ return -1;
+ }
+ if (x > PY_SSIZE_T_MAX / len) {
+ PyErr_Format(PyExc_ValueError,
+ "memoryview.cast(): product(shape) > SSIZE_MAX");
+ return -1;
+ }
+ len *= x;
+ shape[i] = x;
}
- if (fort == 'F') {
- func = _Py_add_one_to_index_F;
+
+ return len;
+}
+
+/* Cast a 1-D array to a new shape. The result array will be C-contiguous.
+ If the result array does not have exactly the same byte length as the
+ input array, raise ValueError. */
+static int
+cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
+{
+ Py_buffer *view = &mv->view;
+ Py_ssize_t len;
+
+ assert(view->ndim == 1); /* ndim from cast_to_1D() */
+ assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
+ assert(view->shape == mv->ob_array);
+ assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
+ assert(view->suboffsets == NULL);
+
+ view->ndim = ndim;
+ if (view->ndim == 0) {
+ view->shape = NULL;
+ view->strides = NULL;
+ len = view->itemsize;
}
else {
- func = _Py_add_one_to_index_C;
+ len = copy_shape(view->shape, shape, ndim, view->itemsize);
+ if (len < 0)
+ return -1;
+ init_strides_from_shape(view);
}
- while (elements--) {
- func(view->ndim, indices, view->shape);
- ptr = PyBuffer_GetPointer(view, indices);
- memcpy(dest, ptr, view->itemsize);
- dest += view->itemsize;
+
+ if (view->len != len) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: product(shape) * itemsize != buffer size");
+ return -1;
}
- PyMem_Free(indices);
+ init_flags(mv);
+
+ return 0;
+}
+
+static int
+zero_in_shape(PyMemoryViewObject *mv)
+{
+ Py_buffer *view = &mv->view;
+ Py_ssize_t i;
+
+ for (i = 0; i < view->ndim; i++)
+ if (view->shape[i] == 0)
+ return 1;
+
return 0;
}
/*
- Get a the data from an object as a contiguous chunk of memory (in
- either 'C' or 'F'ortran order) even if it means copying it into a
- separate memory area.
-
- Returns a new reference to a Memory view object. If no copy is needed,
- the memory view object points to the original memory and holds a
- lock on the original. If a copy is needed, then the memory view object
- points to a brand-new Bytes object (and holds a memory lock on it).
-
- buffertype
-
- PyBUF_READ buffer only needs to be read-only
- PyBUF_WRITE buffer needs to be writable (give error if not contiguous)
- PyBUF_SHADOW buffer needs to be writable so shadow it with
- a contiguous buffer if it is not. The view will point to
- the shadow buffer which can be written to and then
- will be copied back into the other buffer when the memory
- view is de-allocated. While the shadow buffer is
- being used, it will have an exclusive write lock on
- the original buffer.
- */
+ Cast a copy of 'self' to a different view. The input view must
+ be C-contiguous. The function always casts the input view to a
+ 1-D output according to 'format'. At least one of input-format,
+ output-format must have byte size.
-PyObject *
-PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char fort)
+ If 'shape' is given, the 1-D view from the previous step will
+ be cast to a C-contiguous view with new shape and strides.
+
+ All casts must result in views that will have the exact byte
+ size of the original input. Otherwise, an error is raised.
+*/
+static PyObject *
+memory_cast(PyMemoryViewObject *self, PyObject *args, PyObject *kwds)
{
- PyMemoryViewObject *mem;
- PyObject *bytes;
- Py_buffer *view;
- int flags;
- char *dest;
+ static char *kwlist[] = {"format", "shape", NULL};
+ PyMemoryViewObject *mv = NULL;
+ PyObject *shape = NULL;
+ PyObject *format;
+ Py_ssize_t ndim = 1;
- if (!PyObject_CheckBuffer(obj)) {
+ CHECK_RELEASED(self);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist,
+ &format, &shape)) {
+ return NULL;
+ }
+ if (!PyUnicode_Check(format)) {
PyErr_SetString(PyExc_TypeError,
- "object does not support the buffer interface");
+ "memoryview: format argument must be a string");
return NULL;
}
-
- mem = PyObject_GC_New(PyMemoryViewObject, &PyMemoryView_Type);
- if (mem == NULL)
+ if (!MV_C_CONTIGUOUS(self->flags)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: casts are restricted to C-contiguous views");
return NULL;
-
- view = &mem->view;
- flags = PyBUF_FULL_RO;
- switch(buffertype) {
- case PyBUF_WRITE:
- flags = PyBUF_FULL;
- break;
+ }
+ if (zero_in_shape(self)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: cannot cast view with zeros in shape or strides");
+ return NULL;
+ }
+ if (shape) {
+ CHECK_LIST_OR_TUPLE(shape)
+ ndim = PySequence_Fast_GET_SIZE(shape);
+ if (ndim > PyBUF_MAX_NDIM) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: number of dimensions must not exceed "
+ STRINGIZE(PyBUF_MAX_NDIM));
+ return NULL;
+ }
+ if (self->view.ndim != 1 && ndim != 1) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: cast must be 1D -> ND or ND -> 1D");
+ return NULL;
+ }
}
- if (PyObject_GetBuffer(obj, view, flags) != 0) {
- Py_DECREF(mem);
+ mv = (PyMemoryViewObject *)
+ mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
+ if (mv == NULL)
return NULL;
+
+ if (cast_to_1D(mv, format) < 0)
+ goto error;
+ if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
+ goto error;
+
+ return (PyObject *)mv;
+
+error:
+ Py_DECREF(mv);
+ return NULL;
+}
+
+
+/**************************************************************************/
+/* getbuffer */
+/**************************************************************************/
+
+static int
+memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
+{
+ Py_buffer *base = &self->view;
+ int baseflags = self->flags;
+
+ CHECK_RELEASED_INT(self);
+
+ /* start with complete information */
+ *view = *base;
+ view->obj = NULL;
+
+ if (REQ_WRITABLE(flags) && base->readonly) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not writable");
+ return -1;
+ }
+ if (!REQ_FORMAT(flags)) {
+ /* NULL indicates that the buffer's data type has been cast to 'B'.
+ view->itemsize is the _previous_ itemsize. If shape is present,
+ the equality product(shape) * itemsize = len still holds at this
+ point. The equality calcsize(format) = itemsize does _not_ hold
+ from here on! */
+ view->format = NULL;
}
- if (PyBuffer_IsContiguous(view, fort)) {
- /* no copy needed */
- _PyObject_GC_TRACK(mem);
- return (PyObject *)mem;
+ if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not C-contiguous");
+ return -1;
}
- /* otherwise a copy is needed */
- if (buffertype == PyBUF_WRITE) {
- Py_DECREF(mem);
+ if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
PyErr_SetString(PyExc_BufferError,
- "writable contiguous buffer requested "
- "for a non-contiguousobject.");
- return NULL;
+ "memoryview: underlying buffer is not Fortran contiguous");
+ return -1;
}
- bytes = PyBytes_FromStringAndSize(NULL, view->len);
- if (bytes == NULL) {
- Py_DECREF(mem);
- return NULL;
+ if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not contiguous");
+ return -1;
}
- dest = PyBytes_AS_STRING(bytes);
- /* different copying strategy depending on whether
- or not any pointer de-referencing is needed
- */
- /* strided or in-direct copy */
- if (view->suboffsets==NULL) {
- _strided_copy_nd(dest, view->buf, view->ndim, view->shape,
- view->strides, view->itemsize, fort);
+ if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer requires suboffsets");
+ return -1;
}
- else {
- if (_indirect_copy_nd(dest, view, fort) < 0) {
- Py_DECREF(bytes);
- Py_DECREF(mem);
- return NULL;
+ if (!REQ_STRIDES(flags)) {
+ if (!MV_C_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not C-contiguous");
+ return -1;
}
- PyBuffer_Release(view); /* XXX ? */
+ view->strides = NULL;
+ }
+ if (!REQ_SHAPE(flags)) {
+ /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
+ so base->buf = ndbuf->data. */
+ if (view->format != NULL) {
+ /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
+ not make sense. */
+ PyErr_Format(PyExc_BufferError,
+ "ndarray: cannot cast to unsigned bytes if the format flag "
+ "is present");
+ return -1;
+ }
+ /* product(shape) * itemsize = len and calcsize(format) = itemsize
+ do _not_ hold from here on! */
+ view->ndim = 1;
+ view->shape = NULL;
}
- _PyObject_GC_TRACK(mem);
- return (PyObject *)mem;
-}
-static PyObject *
-memory_format_get(PyMemoryViewObject *self)
+ view->obj = (PyObject *)self;
+ Py_INCREF(view->obj);
+ self->exports++;
+
+ return 0;
+}
+
+static void
+memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
{
- CHECK_RELEASED(self);
- return PyUnicode_FromString(self->view.format);
+ self->exports--;
+ return;
+ /* PyBuffer_Release() decrements view->obj after this function returns. */
}
-static PyObject *
-memory_itemsize_get(PyMemoryViewObject *self)
+/* Buffer methods */
+static PyBufferProcs memory_as_buffer = {
+ (getbufferproc)memory_getbuf, /* bf_getbuffer */
+ (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
+};
+
+
+/****************************************************************************/
+/* Optimized pack/unpack for all native format specifiers */
+/****************************************************************************/
+
+/*
+ Fix exceptions:
+ 1) Include format string in the error message.
+ 2) OverflowError -> ValueError.
+ 3) The error message from PyNumber_Index() is not ideal.
+*/
+static int
+type_error_int(const char *fmt)
{
- CHECK_RELEASED(self);
- return PyLong_FromSsize_t(self->view.itemsize);
+ PyErr_Format(PyExc_TypeError,
+ "memoryview: invalid type for format '%s'", fmt);
+ return -1;
}
-static PyObject *
-_IntTupleFromSsizet(int len, Py_ssize_t *vals)
+static int
+value_error_int(const char *fmt)
{
- int i;
- PyObject *o;
- PyObject *intTuple;
+ PyErr_Format(PyExc_ValueError,
+ "memoryview: invalid value for format '%s'", fmt);
+ return -1;
+}
- if (vals == NULL) {
- Py_INCREF(Py_None);
- return Py_None;
+static int
+fix_error_int(const char *fmt)
+{
+ assert(PyErr_Occurred());
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ return type_error_int(fmt);
}
- intTuple = PyTuple_New(len);
- if (!intTuple)
- return NULL;
- for (i=0; i<len; i++) {
- o = PyLong_FromSsize_t(vals[i]);
- if (!o) {
- Py_DECREF(intTuple);
- return NULL;
- }
- PyTuple_SET_ITEM(intTuple, i, o);
+ else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
+ PyErr_ExceptionMatches(PyExc_ValueError)) {
+ PyErr_Clear();
+ return value_error_int(fmt);
}
- return intTuple;
+
+ return -1;
}
-static PyObject *
-memory_shape_get(PyMemoryViewObject *self)
+/* Accept integer objects or objects with an __index__() method. */
+static long
+pylong_as_ld(PyObject *item)
{
- CHECK_RELEASED(self);
- return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
+ PyObject *tmp;
+ long ld;
+
+ tmp = PyNumber_Index(item);
+ if (tmp == NULL)
+ return -1;
+
+ ld = PyLong_AsLong(tmp);
+ Py_DECREF(tmp);
+ return ld;
}
-static PyObject *
-memory_strides_get(PyMemoryViewObject *self)
+static unsigned long
+pylong_as_lu(PyObject *item)
{
- CHECK_RELEASED(self);
- return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
+ PyObject *tmp;
+ unsigned long lu;
+
+ tmp = PyNumber_Index(item);
+ if (tmp == NULL)
+ return (unsigned long)-1;
+
+ lu = PyLong_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return lu;
}
-static PyObject *
-memory_suboffsets_get(PyMemoryViewObject *self)
+#ifdef HAVE_LONG_LONG
+static PY_LONG_LONG
+pylong_as_lld(PyObject *item)
{
- CHECK_RELEASED(self);
- return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
+ PyObject *tmp;
+ PY_LONG_LONG lld;
+
+ tmp = PyNumber_Index(item);
+ if (tmp == NULL)
+ return -1;
+
+ lld = PyLong_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return lld;
}
-static PyObject *
-memory_readonly_get(PyMemoryViewObject *self)
+static unsigned PY_LONG_LONG
+pylong_as_llu(PyObject *item)
{
- CHECK_RELEASED(self);
- return PyBool_FromLong(self->view.readonly);
+ PyObject *tmp;
+ unsigned PY_LONG_LONG llu;
+
+ tmp = PyNumber_Index(item);
+ if (tmp == NULL)
+ return (unsigned PY_LONG_LONG)-1;
+
+ llu = PyLong_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return llu;
}
+#endif
-static PyObject *
-memory_ndim_get(PyMemoryViewObject *self)
+static Py_ssize_t
+pylong_as_zd(PyObject *item)
{
- CHECK_RELEASED(self);
- return PyLong_FromLong(self->view.ndim);
+ PyObject *tmp;
+ Py_ssize_t zd;
+
+ tmp = PyNumber_Index(item);
+ if (tmp == NULL)
+ return -1;
+
+ zd = PyLong_AsSsize_t(tmp);
+ Py_DECREF(tmp);
+ return zd;
}
-static PyGetSetDef memory_getsetlist[] ={
- {"format", (getter)memory_format_get, NULL, NULL},
- {"itemsize", (getter)memory_itemsize_get, NULL, NULL},
- {"shape", (getter)memory_shape_get, NULL, NULL},
- {"strides", (getter)memory_strides_get, NULL, NULL},
- {"suboffsets", (getter)memory_suboffsets_get, NULL, NULL},
- {"readonly", (getter)memory_readonly_get, NULL, NULL},
- {"ndim", (getter)memory_ndim_get, NULL, NULL},
- {NULL, NULL, NULL, NULL},
-};
+static size_t
+pylong_as_zu(PyObject *item)
+{
+ PyObject *tmp;
+ size_t zu;
+ tmp = PyNumber_Index(item);
+ if (tmp == NULL)
+ return (size_t)-1;
-static PyObject *
-memory_tobytes(PyMemoryViewObject *mem, PyObject *noargs)
+ zu = PyLong_AsSize_t(tmp);
+ Py_DECREF(tmp);
+ return zu;
+}
+
+/* Timings with the ndarray from _testbuffer.c indicate that using the
+ struct module is around 15x slower than the two functions below. */
+
+#define UNPACK_SINGLE(dest, ptr, type) \
+ do { \
+ type x; \
+ memcpy((char *)&x, ptr, sizeof x); \
+ dest = x; \
+ } while (0)
+
+/* Unpack a single item. 'fmt' can be any native format character in struct
+ module syntax. This function is very sensitive to small changes. With this
+ layout gcc automatically generates a fast jump table. */
+Py_LOCAL_INLINE(PyObject *)
+unpack_single(const char *ptr, const char *fmt)
{
- CHECK_RELEASED(mem);
- return PyObject_CallFunctionObjArgs(
- (PyObject *) &PyBytes_Type, mem, NULL);
+ unsigned PY_LONG_LONG llu;
+ unsigned long lu;
+ size_t zu;
+ PY_LONG_LONG lld;
+ long ld;
+ Py_ssize_t zd;
+ double d;
+ unsigned char uc;
+ void *p;
+
+ switch (fmt[0]) {
+
+ /* signed integers and fast path for 'B' */
+ case 'B': uc = *((unsigned char *)ptr); goto convert_uc;
+ case 'b': ld = *((signed char *)ptr); goto convert_ld;
+ case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
+ case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
+ case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
+
+ /* boolean */
+ #ifdef HAVE_C99_BOOL
+ case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
+ #else
+ case '?': UNPACK_SINGLE(ld, ptr, char); goto convert_bool;
+ #endif
+
+ /* unsigned integers */
+ case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
+ case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
+ case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
+
+ /* native 64-bit */
+ #ifdef HAVE_LONG_LONG
+ case 'q': UNPACK_SINGLE(lld, ptr, PY_LONG_LONG); goto convert_lld;
+ case 'Q': UNPACK_SINGLE(llu, ptr, unsigned PY_LONG_LONG); goto convert_llu;
+ #endif
+
+ /* ssize_t and size_t */
+ case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
+ case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
+
+ /* floats */
+ case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
+ case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
+
+ /* bytes object */
+ case 'c': goto convert_bytes;
+
+ /* pointer */
+ case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
+
+ /* default */
+ default: goto err_format;
+ }
+
+convert_uc:
+ /* PyLong_FromUnsignedLong() is slower */
+ return PyLong_FromLong(uc);
+convert_ld:
+ return PyLong_FromLong(ld);
+convert_lu:
+ return PyLong_FromUnsignedLong(lu);
+convert_lld:
+ return PyLong_FromLongLong(lld);
+convert_llu:
+ return PyLong_FromUnsignedLongLong(llu);
+convert_zd:
+ return PyLong_FromSsize_t(zd);
+convert_zu:
+ return PyLong_FromSize_t(zu);
+convert_double:
+ return PyFloat_FromDouble(d);
+convert_bool:
+ return PyBool_FromLong(ld);
+convert_bytes:
+ return PyBytes_FromStringAndSize(ptr, 1);
+convert_pointer:
+ return PyLong_FromVoidPtr(p);
+err_format:
+ PyErr_Format(PyExc_NotImplementedError,
+ "memoryview: format %s not supported", fmt);
+ return NULL;
}
-/* TODO: rewrite this function using the struct module to unpack
- each buffer item */
+#define PACK_SINGLE(ptr, src, type) \
+ do { \
+ type x; \
+ x = (type)src; \
+ memcpy(ptr, (char *)&x, sizeof x); \
+ } while (0)
+
+/* Pack a single item. 'fmt' can be any native format character in
+ struct module syntax. */
+static int
+pack_single(char *ptr, PyObject *item, const char *fmt)
+{
+ unsigned PY_LONG_LONG llu;
+ unsigned long lu;
+ size_t zu;
+ PY_LONG_LONG lld;
+ long ld;
+ Py_ssize_t zd;
+ double d;
+ void *p;
+
+ switch (fmt[0]) {
+ /* signed integers */
+ case 'b': case 'h': case 'i': case 'l':
+ ld = pylong_as_ld(item);
+ if (ld == -1 && PyErr_Occurred())
+ goto err_occurred;
+ switch (fmt[0]) {
+ case 'b':
+ if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
+ *((signed char *)ptr) = (signed char)ld; break;
+ case 'h':
+ if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
+ PACK_SINGLE(ptr, ld, short); break;
+ case 'i':
+ if (ld < INT_MIN || ld > INT_MAX) goto err_range;
+ PACK_SINGLE(ptr, ld, int); break;
+ default: /* 'l' */
+ PACK_SINGLE(ptr, ld, long); break;
+ }
+ break;
+
+ /* unsigned integers */
+ case 'B': case 'H': case 'I': case 'L':
+ lu = pylong_as_lu(item);
+ if (lu == (unsigned long)-1 && PyErr_Occurred())
+ goto err_occurred;
+ switch (fmt[0]) {
+ case 'B':
+ if (lu > UCHAR_MAX) goto err_range;
+ *((unsigned char *)ptr) = (unsigned char)lu; break;
+ case 'H':
+ if (lu > USHRT_MAX) goto err_range;
+ PACK_SINGLE(ptr, lu, unsigned short); break;
+ case 'I':
+ if (lu > UINT_MAX) goto err_range;
+ PACK_SINGLE(ptr, lu, unsigned int); break;
+ default: /* 'L' */
+ PACK_SINGLE(ptr, lu, unsigned long); break;
+ }
+ break;
+
+ /* native 64-bit */
+ #ifdef HAVE_LONG_LONG
+ case 'q':
+ lld = pylong_as_lld(item);
+ if (lld == -1 && PyErr_Occurred())
+ goto err_occurred;
+ PACK_SINGLE(ptr, lld, PY_LONG_LONG);
+ break;
+ case 'Q':
+ llu = pylong_as_llu(item);
+ if (llu == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())
+ goto err_occurred;
+ PACK_SINGLE(ptr, llu, unsigned PY_LONG_LONG);
+ break;
+ #endif
+
+ /* ssize_t and size_t */
+ case 'n':
+ zd = pylong_as_zd(item);
+ if (zd == -1 && PyErr_Occurred())
+ goto err_occurred;
+ PACK_SINGLE(ptr, zd, Py_ssize_t);
+ break;
+ case 'N':
+ zu = pylong_as_zu(item);
+ if (zu == (size_t)-1 && PyErr_Occurred())
+ goto err_occurred;
+ PACK_SINGLE(ptr, zu, size_t);
+ break;
+
+ /* floats */
+ case 'f': case 'd':
+ d = PyFloat_AsDouble(item);
+ if (d == -1.0 && PyErr_Occurred())
+ goto err_occurred;
+ if (fmt[0] == 'f') {
+ PACK_SINGLE(ptr, d, float);
+ }
+ else {
+ PACK_SINGLE(ptr, d, double);
+ }
+ break;
+
+ /* bool */
+ case '?':
+ ld = PyObject_IsTrue(item);
+ if (ld < 0)
+ return -1; /* preserve original error */
+ #ifdef HAVE_C99_BOOL
+ PACK_SINGLE(ptr, ld, _Bool);
+ #else
+ PACK_SINGLE(ptr, ld, char);
+ #endif
+ break;
+
+ /* bytes object */
+ case 'c':
+ if (!PyBytes_Check(item))
+ return type_error_int(fmt);
+ if (PyBytes_GET_SIZE(item) != 1)
+ return value_error_int(fmt);
+ *ptr = PyBytes_AS_STRING(item)[0];
+ break;
+ /* pointer */
+ case 'P':
+ p = PyLong_AsVoidPtr(item);
+ if (p == NULL && PyErr_Occurred())
+ goto err_occurred;
+ PACK_SINGLE(ptr, p, void *);
+ break;
+
+ /* default */
+ default: goto err_format;
+ }
+
+ return 0;
+
+err_occurred:
+ return fix_error_int(fmt);
+err_range:
+ return value_error_int(fmt);
+err_format:
+ PyErr_Format(PyExc_NotImplementedError,
+ "memoryview: format %s not supported", fmt);
+ return -1;
+}
+
+
+/****************************************************************************/
+/* Representations */
+/****************************************************************************/
+
+/* allow explicit form of native format */
+Py_LOCAL_INLINE(const char *)
+adjust_fmt(const Py_buffer *view)
+{
+ const char *fmt;
+
+ fmt = (view->format[0] == '@') ? view->format+1 : view->format;
+ if (fmt[0] && fmt[1] == '\0')
+ return fmt;
+
+ PyErr_Format(PyExc_NotImplementedError,
+ "memoryview: unsupported format %s", view->format);
+ return NULL;
+}
+
+/* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
static PyObject *
-memory_tolist(PyMemoryViewObject *mem, PyObject *noargs)
+tolist_base(const char *ptr, const Py_ssize_t *shape,
+ const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
+ const char *fmt)
{
- Py_buffer *view = &(mem->view);
+ PyObject *lst, *item;
Py_ssize_t i;
- PyObject *res, *item;
- char *buf;
- CHECK_RELEASED(mem);
- if (strcmp(view->format, "B") || view->itemsize != 1) {
- PyErr_SetString(PyExc_NotImplementedError,
- "tolist() only supports byte views");
- return NULL;
- }
- if (view->ndim != 1) {
- PyErr_SetString(PyExc_NotImplementedError,
- "tolist() only supports one-dimensional objects");
- return NULL;
- }
- res = PyList_New(view->len);
- if (res == NULL)
+ lst = PyList_New(shape[0]);
+ if (lst == NULL)
return NULL;
- buf = view->buf;
- for (i = 0; i < view->len; i++) {
- item = PyLong_FromUnsignedLong((unsigned char) *buf);
+
+ for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
+ const char *xptr = ADJUST_PTR(ptr, suboffsets);
+ item = unpack_single(xptr, fmt);
if (item == NULL) {
- Py_DECREF(res);
+ Py_DECREF(lst);
return NULL;
}
- PyList_SET_ITEM(res, i, item);
- buf++;
+ PyList_SET_ITEM(lst, i, item);
}
- return res;
+
+ return lst;
}
-static void
-do_release(PyMemoryViewObject *self)
+/* Unpack a multi-dimensional array into a nested list.
+ Assumption: ndim >= 1. */
+static PyObject *
+tolist_rec(const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
+ const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
+ const char *fmt)
{
- if (self->view.obj != NULL) {
- PyBuffer_Release(&(self->view));
+ PyObject *lst, *item;
+ Py_ssize_t i;
+
+ assert(ndim >= 1);
+ assert(shape != NULL);
+ assert(strides != NULL);
+
+ if (ndim == 1)
+ return tolist_base(ptr, shape, strides, suboffsets, fmt);
+
+ lst = PyList_New(shape[0]);
+ if (lst == NULL)
+ return NULL;
+
+ for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
+ const char *xptr = ADJUST_PTR(ptr, suboffsets);
+ item = tolist_rec(xptr, ndim-1, shape+1,
+ strides+1, suboffsets ? suboffsets+1 : NULL,
+ fmt);
+ if (item == NULL) {
+ Py_DECREF(lst);
+ return NULL;
+ }
+ PyList_SET_ITEM(lst, i, item);
}
- self->view.obj = NULL;
- self->view.buf = NULL;
+
+ return lst;
}
+/* Return a list representation of the memoryview. Currently only buffers
+ with native format strings are supported. */
static PyObject *
-memory_enter(PyObject *self, PyObject *args)
+memory_tolist(PyMemoryViewObject *mv, PyObject *noargs)
{
- CHECK_RELEASED(self);
- Py_INCREF(self);
- return self;
+ const Py_buffer *view = &(mv->view);
+ const char *fmt;
+
+ CHECK_RELEASED(mv);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+ if (view->ndim == 0) {
+ return unpack_single(view->buf, fmt);
+ }
+ else if (view->ndim == 1) {
+ return tolist_base(view->buf, view->shape,
+ view->strides, view->suboffsets,
+ fmt);
+ }
+ else {
+ return tolist_rec(view->buf, view->ndim, view->shape,
+ view->strides, view->suboffsets,
+ fmt);
+ }
}
static PyObject *
-memory_exit(PyObject *self, PyObject *args)
+memory_tobytes(PyMemoryViewObject *self, PyObject *dummy)
{
- do_release((PyMemoryViewObject *) self);
- Py_RETURN_NONE;
-}
+ Py_buffer *src = VIEW_ADDR(self);
+ PyObject *bytes = NULL;
-static PyMethodDef memory_methods[] = {
- {"release", memory_exit, METH_NOARGS},
- {"tobytes", (PyCFunction)memory_tobytes, METH_NOARGS, NULL},
- {"tolist", (PyCFunction)memory_tolist, METH_NOARGS, NULL},
- {"__enter__", memory_enter, METH_NOARGS},
- {"__exit__", memory_exit, METH_VARARGS},
- {NULL, NULL} /* sentinel */
-};
+ CHECK_RELEASED(self);
+ if (MV_C_CONTIGUOUS(self->flags)) {
+ return PyBytes_FromStringAndSize(src->buf, src->len);
+ }
-static void
-memory_dealloc(PyMemoryViewObject *self)
-{
- _PyObject_GC_UNTRACK(self);
- do_release(self);
- PyObject_GC_Del(self);
+ bytes = PyBytes_FromStringAndSize(NULL, src->len);
+ if (bytes == NULL)
+ return NULL;
+
+ if (buffer_to_c_contiguous(PyBytes_AS_STRING(bytes), src) < 0) {
+ Py_DECREF(bytes);
+ return NULL;
+ }
+
+ return bytes;
}
static PyObject *
memory_repr(PyMemoryViewObject *self)
{
- if (IS_RELEASED(self))
+ if (self->flags & _Py_MEMORYVIEW_RELEASED)
return PyUnicode_FromFormat("<released memory at %p>", self);
else
return PyUnicode_FromFormat("<memory at %p>", self);
}
-static Py_hash_t
-memory_hash(PyMemoryViewObject *self)
+
+/**************************************************************************/
+/* Indexing and slicing */
+/**************************************************************************/
+
+/* Get the pointer to the item at index. */
+static char *
+ptr_from_index(Py_buffer *view, Py_ssize_t index)
{
- if (self->hash == -1) {
- Py_buffer *view = &self->view;
- CHECK_RELEASED_INT(self);
- if (view->ndim > 1) {
- PyErr_SetString(PyExc_NotImplementedError,
- "can't hash multi-dimensional memoryview object");
- return -1;
- }
- if (view->strides && view->strides[0] != view->itemsize) {
- PyErr_SetString(PyExc_NotImplementedError,
- "can't hash strided memoryview object");
- return -1;
- }
- if (!view->readonly) {
- PyErr_SetString(PyExc_ValueError,
- "can't hash writable memoryview object");
- return -1;
- }
- if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
- /* Keep the original error message */
- return -1;
- }
- /* Can't fail */
- self->hash = _Py_HashBytes((unsigned char *) view->buf, view->len);
+ char *ptr;
+ Py_ssize_t nitems; /* items in the first dimension */
+
+ assert(view->shape);
+ assert(view->strides);
+
+ nitems = view->shape[0];
+ if (index < 0) {
+ index += nitems;
+ }
+ if (index < 0 || index >= nitems) {
+ PyErr_SetString(PyExc_IndexError, "index out of bounds");
+ return NULL;
}
- return self->hash;
-}
-/* Sequence methods */
-static Py_ssize_t
-memory_length(PyMemoryViewObject *self)
-{
- CHECK_RELEASED_INT(self);
- return get_shape0(&self->view);
+ ptr = (char *)view->buf;
+ ptr += view->strides[0] * index;
+
+ ptr = ADJUST_PTR(ptr, view->suboffsets);
+
+ return ptr;
}
-/* Alternate version of memory_subcript that only accepts indices.
- Used by PySeqIter_New().
-*/
+/* Return the item at index. In a one-dimensional view, this is an object
+ with the type specified by view->format. Otherwise, the item is a sub-view.
+ The function is used in memory_subscript() and memory_as_sequence. */
static PyObject *
-memory_item(PyMemoryViewObject *self, Py_ssize_t result)
+memory_item(PyMemoryViewObject *self, Py_ssize_t index)
{
Py_buffer *view = &(self->view);
+ const char *fmt;
CHECK_RELEASED(self);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+
if (view->ndim == 0) {
- PyErr_SetString(PyExc_IndexError,
- "invalid indexing of 0-dim memory");
+ PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
return NULL;
}
if (view->ndim == 1) {
- /* Return a bytes object */
- char *ptr;
- ptr = (char *)view->buf;
- if (result < 0) {
- result += get_shape0(view);
- }
- if ((result < 0) || (result >= get_shape0(view))) {
- PyErr_SetString(PyExc_IndexError,
- "index out of bounds");
+ char *ptr = ptr_from_index(view, index);
+ if (ptr == NULL)
return NULL;
- }
- if (view->strides == NULL)
- ptr += view->itemsize * result;
- else
- ptr += view->strides[0] * result;
- if (view->suboffsets != NULL &&
- view->suboffsets[0] >= 0) {
- ptr = *((char **)ptr) + view->suboffsets[0];
- }
- return PyBytes_FromStringAndSize(ptr, view->itemsize);
- } else {
- /* Return a new memory-view object */
- Py_buffer newview;
- memset(&newview, 0, sizeof(newview));
- /* XXX: This needs to be fixed so it actually returns a sub-view */
- return PyMemoryView_FromBuffer(&newview);
+ return unpack_single(ptr, fmt);
}
+
+ PyErr_SetString(PyExc_NotImplementedError,
+ "multi-dimensional sub-views are not implemented");
+ return NULL;
}
-/*
- mem[obj] returns a bytes object holding the data for one element if
- obj fully indexes the memory view or another memory-view object
- if it does not.
+Py_LOCAL_INLINE(int)
+init_slice(Py_buffer *base, PyObject *key, int dim)
+{
+ Py_ssize_t start, stop, step, slicelength;
- 0-d memory-view objects can be referenced using ... or () but
- not with anything else.
- */
+ if (PySlice_GetIndicesEx(key, base->shape[dim],
+ &start, &stop, &step, &slicelength) < 0) {
+ return -1;
+ }
+
+
+ if (base->suboffsets == NULL || dim == 0) {
+ adjust_buf:
+ base->buf = (char *)base->buf + base->strides[dim] * start;
+ }
+ else {
+ Py_ssize_t n = dim-1;
+ while (n >= 0 && base->suboffsets[n] < 0)
+ n--;
+ if (n < 0)
+ goto adjust_buf; /* all suboffsets are negative */
+ base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
+ }
+ base->shape[dim] = slicelength;
+ base->strides[dim] = base->strides[dim] * step;
+
+ return 0;
+}
+
+static int
+is_multislice(PyObject *key)
+{
+ Py_ssize_t size, i;
+
+ if (!PyTuple_Check(key))
+ return 0;
+ size = PyTuple_GET_SIZE(key);
+ if (size == 0)
+ return 0;
+
+ for (i = 0; i < size; i++) {
+ PyObject *x = PyTuple_GET_ITEM(key, i);
+ if (!PySlice_Check(x))
+ return 0;
+ }
+ return 1;
+}
+
+/* mv[obj] returns an object holding the data for one element if obj
+ fully indexes the memoryview or another memoryview object if it
+ does not.
+
+ 0-d memoryview objects can be referenced using mv[...] or mv[()]
+ but not with anything else. */
static PyObject *
memory_subscript(PyMemoryViewObject *self, PyObject *key)
{
@@ -611,247 +2030,567 @@ memory_subscript(PyMemoryViewObject *self, PyObject *key)
view = &(self->view);
CHECK_RELEASED(self);
+
if (view->ndim == 0) {
- if (key == Py_Ellipsis ||
- (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
+ if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
+ const char *fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+ return unpack_single(view->buf, fmt);
+ }
+ else if (key == Py_Ellipsis) {
Py_INCREF(self);
return (PyObject *)self;
}
else {
- PyErr_SetString(PyExc_IndexError,
- "invalid indexing of 0-dim memory");
+ PyErr_SetString(PyExc_TypeError,
+ "invalid indexing of 0-dim memory");
return NULL;
}
}
+
if (PyIndex_Check(key)) {
- Py_ssize_t result;
- result = PyNumber_AsSsize_t(key, NULL);
- if (result == -1 && PyErr_Occurred())
- return NULL;
- return memory_item(self, result);
+ Py_ssize_t index;
+ index = PyNumber_AsSsize_t(key, PyExc_IndexError);
+ if (index == -1 && PyErr_Occurred())
+ return NULL;
+ return memory_item(self, index);
}
else if (PySlice_Check(key)) {
- Py_ssize_t start, stop, step, slicelength;
+ PyMemoryViewObject *sliced;
- if (PySlice_GetIndicesEx(key, get_shape0(view),
- &start, &stop, &step, &slicelength) < 0) {
+ sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
+ if (sliced == NULL)
+ return NULL;
+
+ if (init_slice(&sliced->view, key, 0) < 0) {
+ Py_DECREF(sliced);
return NULL;
}
-
- if (step == 1 && view->ndim == 1) {
- Py_buffer newview;
- void *newbuf = (char *) view->buf
- + start * view->itemsize;
- int newflags = view->readonly
- ? PyBUF_CONTIG_RO : PyBUF_CONTIG;
-
- /* XXX There should be an API to create a subbuffer */
- if (view->obj != NULL) {
- if (PyObject_GetBuffer(view->obj, &newview, newflags) == -1)
- return NULL;
- }
- else {
- newview = *view;
- }
- newview.buf = newbuf;
- newview.len = slicelength * newview.itemsize;
- newview.format = view->format;
- newview.shape = &(newview.smalltable[0]);
- newview.shape[0] = slicelength;
- newview.strides = &(newview.itemsize);
- return PyMemoryView_FromBuffer(&newview);
- }
- PyErr_SetNone(PyExc_NotImplementedError);
+ init_len(&sliced->view);
+ init_flags(sliced);
+
+ return (PyObject *)sliced;
+ }
+ else if (is_multislice(key)) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "multi-dimensional slicing is not implemented");
return NULL;
}
- PyErr_Format(PyExc_TypeError,
- "cannot index memory using \"%.200s\"",
- key->ob_type->tp_name);
+
+ PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
return NULL;
}
-
-/* Need to support assigning memory if we can */
static int
memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
{
- Py_ssize_t start, len, bytelen;
- Py_buffer srcview;
Py_buffer *view = &(self->view);
- char *srcbuf, *destbuf;
+ Py_buffer src;
+ const char *fmt;
+ char *ptr;
CHECK_RELEASED_INT(self);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return -1;
+
if (view->readonly) {
- PyErr_SetString(PyExc_TypeError,
- "cannot modify read-only memory");
+ PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
return -1;
}
if (value == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "cannot delete memory");
+ PyErr_SetString(PyExc_TypeError, "cannot delete memory");
return -1;
}
- if (view->ndim != 1) {
- PyErr_SetNone(PyExc_NotImplementedError);
- return -1;
- }
- if (PyIndex_Check(key)) {
- start = PyNumber_AsSsize_t(key, NULL);
- if (start == -1 && PyErr_Occurred())
- return -1;
- if (start < 0) {
- start += get_shape0(view);
+ if (view->ndim == 0) {
+ if (key == Py_Ellipsis ||
+ (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
+ ptr = (char *)view->buf;
+ return pack_single(ptr, value, fmt);
}
- if ((start < 0) || (start >= get_shape0(view))) {
- PyErr_SetString(PyExc_IndexError,
- "index out of bounds");
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "invalid indexing of 0-dim memory");
return -1;
}
- len = 1;
}
- else if (PySlice_Check(key)) {
- Py_ssize_t stop, step;
+ if (view->ndim != 1) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "memoryview assignments are currently restricted to ndim = 1");
+ return -1;
+ }
- if (PySlice_GetIndicesEx(key, get_shape0(view),
- &start, &stop, &step, &len) < 0) {
+ if (PyIndex_Check(key)) {
+ Py_ssize_t index = PyNumber_AsSsize_t(key, PyExc_IndexError);
+ if (index == -1 && PyErr_Occurred())
return -1;
- }
- if (step != 1) {
- PyErr_SetNone(PyExc_NotImplementedError);
+ ptr = ptr_from_index(view, index);
+ if (ptr == NULL)
return -1;
+ return pack_single(ptr, value, fmt);
+ }
+ /* one-dimensional: fast path */
+ if (PySlice_Check(key) && view->ndim == 1) {
+ Py_buffer dest; /* sliced view */
+ Py_ssize_t arrays[3];
+ int ret = -1;
+
+ /* rvalue must be an exporter */
+ if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
+ return ret;
+
+ dest = *view;
+ dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
+ dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
+ if (view->suboffsets) {
+ dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
}
+
+ if (init_slice(&dest, key, 0) < 0)
+ goto end_block;
+ dest.len = dest.shape[0] * dest.itemsize;
+
+ ret = copy_single(&dest, &src);
+
+ end_block:
+ PyBuffer_Release(&src);
+ return ret;
}
- else {
- PyErr_Format(PyExc_TypeError,
- "cannot index memory using \"%.200s\"",
- key->ob_type->tp_name);
+ else if (PySlice_Check(key) || is_multislice(key)) {
+ /* Call memory_subscript() to produce a sliced lvalue, then copy
+ rvalue into lvalue. This is already implemented in _testbuffer.c. */
+ PyErr_SetString(PyExc_NotImplementedError,
+ "memoryview slice assignments are currently restricted "
+ "to ndim = 1");
return -1;
}
- if (PyObject_GetBuffer(value, &srcview, PyBUF_CONTIG_RO) == -1) {
- return -1;
+
+ PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
+ return -1;
+}
+
+static Py_ssize_t
+memory_length(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED_INT(self);
+ return self->view.ndim == 0 ? 1 : self->view.shape[0];
+}
+
+/* As mapping */
+static PyMappingMethods memory_as_mapping = {
+ (lenfunc)memory_length, /* mp_length */
+ (binaryfunc)memory_subscript, /* mp_subscript */
+ (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
+};
+
+/* As sequence */
+static PySequenceMethods memory_as_sequence = {
+ 0, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ (ssizeargfunc)memory_item, /* sq_item */
+};
+
+
+/**************************************************************************/
+/* Comparisons */
+/**************************************************************************/
+
+#define CMP_SINGLE(p, q, type) \
+ do { \
+ type x; \
+ type y; \
+ memcpy((char *)&x, p, sizeof x); \
+ memcpy((char *)&y, q, sizeof y); \
+ equal = (x == y); \
+ } while (0)
+
+Py_LOCAL_INLINE(int)
+unpack_cmp(const char *p, const char *q, const char *fmt)
+{
+ int equal;
+
+ switch (fmt[0]) {
+
+ /* signed integers and fast path for 'B' */
+ case 'B': return *((unsigned char *)p) == *((unsigned char *)q);
+ case 'b': return *((signed char *)p) == *((signed char *)q);
+ case 'h': CMP_SINGLE(p, q, short); return equal;
+ case 'i': CMP_SINGLE(p, q, int); return equal;
+ case 'l': CMP_SINGLE(p, q, long); return equal;
+
+ /* boolean */
+ #ifdef HAVE_C99_BOOL
+ case '?': CMP_SINGLE(p, q, _Bool); return equal;
+ #else
+ case '?': CMP_SINGLE(p, q, char); return equal;
+ #endif
+
+ /* unsigned integers */
+ case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
+ case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
+ case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
+
+ /* native 64-bit */
+ #ifdef HAVE_LONG_LONG
+ case 'q': CMP_SINGLE(p, q, PY_LONG_LONG); return equal;
+ case 'Q': CMP_SINGLE(p, q, unsigned PY_LONG_LONG); return equal;
+ #endif
+
+ /* ssize_t and size_t */
+ case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
+ case 'N': CMP_SINGLE(p, q, size_t); return equal;
+
+ /* floats */
+ /* XXX DBL_EPSILON? */
+ case 'f': CMP_SINGLE(p, q, float); return equal;
+ case 'd': CMP_SINGLE(p, q, double); return equal;
+
+ /* bytes object */
+ case 'c': return *p == *q;
+
+ /* pointer */
+ case 'P': CMP_SINGLE(p, q, void *); return equal;
+
+ /* Py_NotImplemented */
+ default: return -1;
}
- /* XXX should we allow assignment of different item sizes
- as long as the byte length is the same?
- (e.g. assign 2 shorts to a 4-byte slice) */
- if (srcview.itemsize != view->itemsize) {
- PyErr_Format(PyExc_TypeError,
- "mismatching item sizes for \"%.200s\" and \"%.200s\"",
- view->obj->ob_type->tp_name, srcview.obj->ob_type->tp_name);
- goto _error;
- }
- bytelen = len * view->itemsize;
- if (bytelen != srcview.len) {
- PyErr_SetString(PyExc_ValueError,
- "cannot modify size of memoryview object");
- goto _error;
- }
- /* Do the actual copy */
- destbuf = (char *) view->buf + start * view->itemsize;
- srcbuf = (char *) srcview.buf;
- if (destbuf + bytelen < srcbuf || srcbuf + bytelen < destbuf)
- /* No overlapping */
- memcpy(destbuf, srcbuf, bytelen);
- else
- memmove(destbuf, srcbuf, bytelen);
+}
- PyBuffer_Release(&srcview);
- return 0;
+/* Base case for recursive array comparisons. Assumption: ndim == 1. */
+static int
+cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
+ const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
+ const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
+ const char *fmt)
+{
+ Py_ssize_t i;
+ int equal;
+
+ for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
+ const char *xp = ADJUST_PTR(p, psuboffsets);
+ const char *xq = ADJUST_PTR(q, qsuboffsets);
+ equal = unpack_cmp(xp, xq, fmt);
+ if (equal <= 0)
+ return equal;
+ }
-_error:
- PyBuffer_Release(&srcview);
- return -1;
+ return 1;
+}
+
+/* Recursively compare two multi-dimensional arrays that have the same
+ logical structure. Assumption: ndim >= 1. */
+static int
+cmp_rec(const char *p, const char *q,
+ Py_ssize_t ndim, const Py_ssize_t *shape,
+ const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
+ const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
+ const char *fmt)
+{
+ Py_ssize_t i;
+ int equal;
+
+ assert(ndim >= 1);
+ assert(shape != NULL);
+ assert(pstrides != NULL);
+ assert(qstrides != NULL);
+
+ if (ndim == 1) {
+ return cmp_base(p, q, shape,
+ pstrides, psuboffsets,
+ qstrides, qsuboffsets,
+ fmt);
+ }
+
+ for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
+ const char *xp = ADJUST_PTR(p, psuboffsets);
+ const char *xq = ADJUST_PTR(q, qsuboffsets);
+ equal = cmp_rec(xp, xq, ndim-1, shape+1,
+ pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
+ qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
+ fmt);
+ if (equal <= 0)
+ return equal;
+ }
+
+ return 1;
}
static PyObject *
memory_richcompare(PyObject *v, PyObject *w, int op)
{
- Py_buffer vv, ww;
- int equal = 0;
PyObject *res;
+ Py_buffer wbuf, *vv, *ww = NULL;
+ const char *vfmt, *wfmt;
+ int equal = -1; /* Py_NotImplemented */
- vv.obj = NULL;
- ww.obj = NULL;
if (op != Py_EQ && op != Py_NE)
- goto _notimpl;
- if ((PyMemoryView_Check(v) && IS_RELEASED(v)) ||
- (PyMemoryView_Check(w) && IS_RELEASED(w))) {
+ goto result; /* Py_NotImplemented */
+
+ assert(PyMemoryView_Check(v));
+ if (BASE_INACCESSIBLE(v)) {
equal = (v == w);
- goto _end;
+ goto result;
}
- if (PyObject_GetBuffer(v, &vv, PyBUF_CONTIG_RO) == -1) {
- PyErr_Clear();
- goto _notimpl;
+ vv = VIEW_ADDR(v);
+
+ if (PyMemoryView_Check(w)) {
+ if (BASE_INACCESSIBLE(w)) {
+ equal = (v == w);
+ goto result;
+ }
+ ww = VIEW_ADDR(w);
+ }
+ else {
+ if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
+ PyErr_Clear();
+ goto result; /* Py_NotImplemented */
+ }
+ ww = &wbuf;
}
- if (PyObject_GetBuffer(w, &ww, PyBUF_CONTIG_RO) == -1) {
+
+ vfmt = adjust_fmt(vv);
+ wfmt = adjust_fmt(ww);
+ if (vfmt == NULL || wfmt == NULL) {
PyErr_Clear();
- goto _notimpl;
+ goto result; /* Py_NotImplemented */
}
- if (vv.itemsize != ww.itemsize || vv.len != ww.len)
- goto _end;
+ if (cmp_structure(vv, ww) < 0) {
+ PyErr_Clear();
+ equal = 0;
+ goto result;
+ }
- equal = !memcmp(vv.buf, ww.buf, vv.len);
+ if (vv->ndim == 0) {
+ equal = unpack_cmp(vv->buf, ww->buf, vfmt);
+ }
+ else if (vv->ndim == 1) {
+ equal = cmp_base(vv->buf, ww->buf, vv->shape,
+ vv->strides, vv->suboffsets,
+ ww->strides, ww->suboffsets,
+ vfmt);
+ }
+ else {
+ equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
+ vv->strides, vv->suboffsets,
+ ww->strides, ww->suboffsets,
+ vfmt);
+ }
-_end:
- PyBuffer_Release(&vv);
- PyBuffer_Release(&ww);
- if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
+result:
+ if (equal < 0)
+ res = Py_NotImplemented;
+ else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
res = Py_True;
else
res = Py_False;
+
+ if (ww == &wbuf)
+ PyBuffer_Release(ww);
Py_INCREF(res);
return res;
+}
+
+/**************************************************************************/
+/* Hash */
+/**************************************************************************/
+
+static Py_hash_t
+memory_hash(PyMemoryViewObject *self)
+{
+ if (self->hash == -1) {
+ Py_buffer *view = &self->view;
+ char *mem = view->buf;
+
+ CHECK_RELEASED_INT(self);
-_notimpl:
- PyBuffer_Release(&vv);
- PyBuffer_Release(&ww);
- Py_RETURN_NOTIMPLEMENTED;
+ if (!view->readonly) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot hash writable memoryview object");
+ return -1;
+ }
+ if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
+ /* Keep the original error message */
+ return -1;
+ }
+
+ if (!MV_C_CONTIGUOUS(self->flags)) {
+ mem = PyMem_Malloc(view->len);
+ if (mem == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ if (buffer_to_c_contiguous(mem, view) < 0) {
+ PyMem_Free(mem);
+ return -1;
+ }
+ }
+
+ /* Can't fail */
+ self->hash = _Py_HashBytes((unsigned char *)mem, view->len);
+
+ if (mem != view->buf)
+ PyMem_Free(mem);
+ }
+
+ return self->hash;
}
-static int
-memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
+/**************************************************************************/
+/* getters */
+/**************************************************************************/
+
+static PyObject *
+_IntTupleFromSsizet(int len, Py_ssize_t *vals)
{
- if (self->view.obj != NULL)
- Py_VISIT(self->view.obj);
- return 0;
+ int i;
+ PyObject *o;
+ PyObject *intTuple;
+
+ if (vals == NULL)
+ return PyTuple_New(0);
+
+ intTuple = PyTuple_New(len);
+ if (!intTuple)
+ return NULL;
+ for (i=0; i<len; i++) {
+ o = PyLong_FromSsize_t(vals[i]);
+ if (!o) {
+ Py_DECREF(intTuple);
+ return NULL;
+ }
+ PyTuple_SET_ITEM(intTuple, i, o);
+ }
+ return intTuple;
}
-static int
-memory_clear(PyMemoryViewObject *self)
+static PyObject *
+memory_obj_get(PyMemoryViewObject *self)
{
- PyBuffer_Release(&self->view);
- return 0;
+ Py_buffer *view = &self->view;
+
+ CHECK_RELEASED(self);
+ if (view->obj == NULL) {
+ Py_RETURN_NONE;
+ }
+ Py_INCREF(view->obj);
+ return view->obj;
}
+static PyObject *
+memory_nbytes_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return PyLong_FromSsize_t(self->view.len);
+}
-/* As mapping */
-static PyMappingMethods memory_as_mapping = {
- (lenfunc)memory_length, /* mp_length */
- (binaryfunc)memory_subscript, /* mp_subscript */
- (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
-};
+static PyObject *
+memory_format_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return PyUnicode_FromString(self->view.format);
+}
-static PySequenceMethods memory_as_sequence = {
- 0, /* sq_length */
- 0, /* sq_concat */
- 0, /* sq_repeat */
- (ssizeargfunc)memory_item, /* sq_item */
+static PyObject *
+memory_itemsize_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return PyLong_FromSsize_t(self->view.itemsize);
+}
+
+static PyObject *
+memory_shape_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
+}
+
+static PyObject *
+memory_strides_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
+}
+
+static PyObject *
+memory_suboffsets_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
+}
+
+static PyObject *
+memory_readonly_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(self->view.readonly);
+}
+
+static PyObject *
+memory_ndim_get(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED(self);
+ return PyLong_FromLong(self->view.ndim);
+}
+
+static PyObject *
+memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
+}
+
+static PyObject *
+memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
+}
+
+static PyObject *
+memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
+}
+
+static PyGetSetDef memory_getsetlist[] = {
+ {"obj", (getter)memory_obj_get, NULL, NULL},
+ {"nbytes", (getter)memory_nbytes_get, NULL, NULL},
+ {"readonly", (getter)memory_readonly_get, NULL, NULL},
+ {"itemsize", (getter)memory_itemsize_get, NULL, NULL},
+ {"format", (getter)memory_format_get, NULL, NULL},
+ {"ndim", (getter)memory_ndim_get, NULL, NULL},
+ {"shape", (getter)memory_shape_get, NULL, NULL},
+ {"strides", (getter)memory_strides_get, NULL, NULL},
+ {"suboffsets", (getter)memory_suboffsets_get, NULL, NULL},
+ {"c_contiguous", (getter)memory_c_contiguous, NULL, NULL},
+ {"f_contiguous", (getter)memory_f_contiguous, NULL, NULL},
+ {"contiguous", (getter)memory_contiguous, NULL, NULL},
+ {NULL, NULL, NULL, NULL},
};
-/* Buffer methods */
-static PyBufferProcs memory_as_buffer = {
- (getbufferproc)memory_getbuf, /* bf_getbuffer */
- (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
+static PyMethodDef memory_methods[] = {
+ {"release", (PyCFunction)memory_release, METH_NOARGS},
+ {"tobytes", (PyCFunction)memory_tobytes, METH_NOARGS, NULL},
+ {"tolist", (PyCFunction)memory_tolist, METH_NOARGS, NULL},
+ {"cast", (PyCFunction)memory_cast, METH_VARARGS|METH_KEYWORDS, NULL},
+ {"__enter__", memory_enter, METH_NOARGS},
+ {"__exit__", memory_exit, METH_VARARGS},
+ {NULL, NULL}
};
PyTypeObject PyMemoryView_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
- "memoryview",
- sizeof(PyMemoryViewObject),
- 0,
+ "memoryview", /* tp_name */
+ offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
+ sizeof(Py_ssize_t), /* tp_itemsize */
(destructor)memory_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
diff --git a/Objects/object.c b/Objects/object.c
index 2665d21..70d320c 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -1650,6 +1650,9 @@ _Py_ReadyTypes(void)
if (PyType_Ready(&PyProperty_Type) < 0)
Py_FatalError("Can't initialize property type");
+ if (PyType_Ready(&_PyManagedBuffer_Type) < 0)
+ Py_FatalError("Can't initialize managed buffer type");
+
if (PyType_Ready(&PyMemoryView_Type) < 0)
Py_FatalError("Can't initialize memoryview type");