summaryrefslogtreecommitdiffstats
path: root/Objects
diff options
context:
space:
mode:
authorTravis E. Oliphant <oliphant@enthought.com>2007-10-12 23:27:53 (GMT)
committerTravis E. Oliphant <oliphant@enthought.com>2007-10-12 23:27:53 (GMT)
commitfe9bed02e430613f95849fa49aaa4f823a762738 (patch)
treef26a61380fcfda85caaaf70f55e74c75c5a0b90f /Objects
parent9b30784ab2867bcb7335add7306bbd71e2de2151 (diff)
downloadcpython-fe9bed02e430613f95849fa49aaa4f823a762738.zip
cpython-fe9bed02e430613f95849fa49aaa4f823a762738.tar.gz
cpython-fe9bed02e430613f95849fa49aaa4f823a762738.tar.bz2
Fix problems with memoryview object. There is still more to do to finish PEP 3118. The memory-view object needs to be fleshed out and the struct module needs to be modified.
Diffstat (limited to 'Objects')
-rw-r--r--Objects/abstract.c694
-rw-r--r--Objects/memoryobject.c26
2 files changed, 366 insertions, 354 deletions
diff --git a/Objects/abstract.c b/Objects/abstract.c
index 141996d..3d736d1 100644
--- a/Objects/abstract.c
+++ b/Objects/abstract.c
@@ -220,11 +220,11 @@ PyObject_DelItemString(PyObject *o, char *key)
*/
int
PyObject_AsCharBuffer(PyObject *obj,
- const char **buffer,
- Py_ssize_t *buffer_len)
+ const char **buffer,
+ Py_ssize_t *buffer_len)
{
PyBufferProcs *pb;
- Py_buffer view;
+ Py_buffer view;
if (obj == NULL || buffer == NULL || buffer_len == NULL) {
null_error();
@@ -235,30 +235,30 @@ PyObject_AsCharBuffer(PyObject *obj,
PyErr_SetString(PyExc_TypeError,
"expected an object with the buffer interface");
return -1;
- }
- if ((*pb->bf_getbuffer)(obj, &view, PyBUF_CHARACTER)) return -1;
+ }
+ if ((*pb->bf_getbuffer)(obj, &view, PyBUF_CHARACTER)) return -1;
*buffer = view.buf;
*buffer_len = view.len;
- if (pb->bf_releasebuffer != NULL)
- (*pb->bf_releasebuffer)(obj, &view);
+ if (pb->bf_releasebuffer != NULL)
+ (*pb->bf_releasebuffer)(obj, &view);
return 0;
}
int
PyObject_CheckReadBuffer(PyObject *obj)
{
- PyBufferProcs *pb = obj->ob_type->tp_as_buffer;
+ PyBufferProcs *pb = obj->ob_type->tp_as_buffer;
if (pb == NULL ||
pb->bf_getbuffer == NULL)
- return 0;
- if ((*pb->bf_getbuffer)(obj, NULL, PyBUF_SIMPLE) == -1) {
- PyErr_Clear();
- return 0;
- }
- if (*pb->bf_releasebuffer != NULL)
- (*pb->bf_releasebuffer)(obj, NULL);
+ return 0;
+ if ((*pb->bf_getbuffer)(obj, NULL, PyBUF_SIMPLE) == -1) {
+ PyErr_Clear();
+ return 0;
+ }
+ if (*pb->bf_releasebuffer != NULL)
+ (*pb->bf_releasebuffer)(obj, NULL);
return 1;
}
@@ -267,7 +267,7 @@ int PyObject_AsReadBuffer(PyObject *obj,
Py_ssize_t *buffer_len)
{
PyBufferProcs *pb;
- Py_buffer view;
+ Py_buffer view;
if (obj == NULL || buffer == NULL || buffer_len == NULL) {
null_error();
@@ -275,18 +275,18 @@ int PyObject_AsReadBuffer(PyObject *obj,
}
pb = obj->ob_type->tp_as_buffer;
if (pb == NULL ||
- pb->bf_getbuffer == NULL) {
+ pb->bf_getbuffer == NULL) {
PyErr_SetString(PyExc_TypeError,
"expected an object with a buffer interface");
return -1;
}
- if ((*pb->bf_getbuffer)(obj, &view, PyBUF_SIMPLE)) return -1;
+ if ((*pb->bf_getbuffer)(obj, &view, PyBUF_SIMPLE)) return -1;
*buffer = view.buf;
*buffer_len = view.len;
- if (pb->bf_releasebuffer != NULL)
- (*pb->bf_releasebuffer)(obj, &view);
+ if (pb->bf_releasebuffer != NULL)
+ (*pb->bf_releasebuffer)(obj, &view);
return 0;
}
@@ -295,7 +295,7 @@ int PyObject_AsWriteBuffer(PyObject *obj,
Py_ssize_t *buffer_len)
{
PyBufferProcs *pb;
- Py_buffer view;
+ Py_buffer view;
if (obj == NULL || buffer == NULL || buffer_len == NULL) {
null_error();
@@ -303,17 +303,17 @@ int PyObject_AsWriteBuffer(PyObject *obj,
}
pb = obj->ob_type->tp_as_buffer;
if (pb == NULL ||
- pb->bf_getbuffer == NULL ||
- ((*pb->bf_getbuffer)(obj, &view, PyBUF_WRITABLE) != 0)) {
+ pb->bf_getbuffer == NULL ||
+ ((*pb->bf_getbuffer)(obj, &view, PyBUF_WRITABLE) != 0)) {
PyErr_SetString(PyExc_TypeError,
- "expected an object with a writable buffer interface");
+ "expected an object with a writable buffer interface");
return -1;
}
*buffer = view.buf;
*buffer_len = view.len;
- if (pb->bf_releasebuffer != NULL)
- (*pb->bf_releasebuffer)(obj, &view);
+ if (pb->bf_releasebuffer != NULL)
+ (*pb->bf_releasebuffer)(obj, &view);
return 0;
}
@@ -322,128 +322,128 @@ int PyObject_AsWriteBuffer(PyObject *obj,
int
PyObject_GetBuffer(PyObject *obj, Py_buffer *view, int flags)
{
- if (!PyObject_CheckBuffer(obj)) {
- PyErr_SetString(PyExc_TypeError,
- "object does not have the buffer interface");
- return -1;
- }
- return (*(obj->ob_type->tp_as_buffer->bf_getbuffer))(obj, view, flags);
+ if (!PyObject_CheckBuffer(obj)) {
+ PyErr_SetString(PyExc_TypeError,
+ "object does not have the buffer interface");
+ return -1;
+ }
+ return (*(obj->ob_type->tp_as_buffer->bf_getbuffer))(obj, view, flags);
}
void
PyObject_ReleaseBuffer(PyObject *obj, Py_buffer *view)
{
- if (obj->ob_type->tp_as_buffer != NULL &&
- obj->ob_type->tp_as_buffer->bf_releasebuffer != NULL) {
- (*(obj->ob_type->tp_as_buffer->bf_releasebuffer))(obj, view);
- }
+ if (obj->ob_type->tp_as_buffer != NULL &&
+ obj->ob_type->tp_as_buffer->bf_releasebuffer != NULL) {
+ (*(obj->ob_type->tp_as_buffer->bf_releasebuffer))(obj, view);
+ }
}
static int
_IsFortranContiguous(Py_buffer *view)
{
- Py_ssize_t sd, dim;
- int i;
-
- if (view->ndim == 0) return 1;
- if (view->strides == NULL) return (view->ndim == 1);
-
- sd = view->itemsize;
- if (view->ndim == 1) return (view->shape[0] == 1 ||
- sd == view->strides[0]);
- for (i=0; i<view->ndim; i++) {
- dim = view->shape[i];
- if (dim == 0) return 1;
- if (view->strides[i] != sd) return 0;
- sd *= dim;
- }
- return 1;
+ Py_ssize_t sd, dim;
+ int i;
+
+ if (view->ndim == 0) return 1;
+ if (view->strides == NULL) return (view->ndim == 1);
+
+ sd = view->itemsize;
+ if (view->ndim == 1) return (view->shape[0] == 1 ||
+ sd == view->strides[0]);
+ for (i=0; i<view->ndim; i++) {
+ dim = view->shape[i];
+ if (dim == 0) return 1;
+ if (view->strides[i] != sd) return 0;
+ sd *= dim;
+ }
+ return 1;
}
static int
_IsCContiguous(Py_buffer *view)
{
- Py_ssize_t sd, dim;
- int i;
-
- if (view->ndim == 0) return 1;
- if (view->strides == NULL) return 1;
+ Py_ssize_t sd, dim;
+ int i;
+
+ if (view->ndim == 0) return 1;
+ if (view->strides == NULL) return 1;
- sd = view->itemsize;
- if (view->ndim == 1) return (view->shape[0] == 1 ||
- sd == view->strides[0]);
- for (i=view->ndim-1; i>=0; i--) {
- dim = view->shape[i];
- if (dim == 0) return 1;
- if (view->strides[i] != sd) return 0;
- sd *= dim;
- }
- return 1;
+ sd = view->itemsize;
+ if (view->ndim == 1) return (view->shape[0] == 1 ||
+ sd == view->strides[0]);
+ for (i=view->ndim-1; i>=0; i--) {
+ dim = view->shape[i];
+ if (dim == 0) return 1;
+ if (view->strides[i] != sd) return 0;
+ sd *= dim;
+ }
+ return 1;
}
int
PyBuffer_IsContiguous(Py_buffer *view, char fort)
{
- if (view->suboffsets != NULL) return 0;
+ if (view->suboffsets != NULL) return 0;
- if (fort == 'C')
- return _IsCContiguous(view);
- else if (fort == 'F')
- return _IsFortranContiguous(view);
- else if (fort == 'A')
- return (_IsCContiguous(view) || _IsFortranContiguous(view));
- return 0;
+ if (fort == 'C')
+ return _IsCContiguous(view);
+ else if (fort == 'F')
+ return _IsFortranContiguous(view);
+ else if (fort == 'A')
+ return (_IsCContiguous(view) || _IsFortranContiguous(view));
+ return 0;
}
void*
PyBuffer_GetPointer(Py_buffer *view, Py_ssize_t *indices)
{
- char* pointer;
- int i;
- pointer = (char *)view->buf;
- for (i = 0; i < view->ndim; i++) {
- pointer += view->strides[i]*indices[i];
- if ((view->suboffsets != NULL) && (view->suboffsets[i] >= 0)) {
- pointer = *((char**)pointer) + view->suboffsets[i];
- }
- }
- return (void*)pointer;
+ char* pointer;
+ int i;
+ pointer = (char *)view->buf;
+ for (i = 0; i < view->ndim; i++) {
+ pointer += view->strides[i]*indices[i];
+ if ((view->suboffsets != NULL) && (view->suboffsets[i] >= 0)) {
+ pointer = *((char**)pointer) + view->suboffsets[i];
+ }
+ }
+ return (void*)pointer;
}
void
_add_one_to_index_F(int nd, Py_ssize_t *index, Py_ssize_t *shape)
{
- int k;
-
- for (k=0; k<nd; k++) {
- if (index[k] < shape[k]-1) {
- index[k]++;
- break;
- }
- else {
- index[k] = 0;
- }
- }
+ int k;
+
+ for (k=0; k<nd; k++) {
+ if (index[k] < shape[k]-1) {
+ index[k]++;
+ break;
+ }
+ else {
+ index[k] = 0;
+ }
+ }
}
void
_add_one_to_index_C(int nd, Py_ssize_t *index, Py_ssize_t *shape)
{
- int k;
+ int k;
- for (k=nd-1; k>=0; k--) {
- if (index[k] < shape[k]-1) {
- index[k]++;
- break;
- }
- else {
- index[k] = 0;
- }
- }
+ for (k=nd-1; k>=0; k--) {
+ if (index[k] < shape[k]-1) {
+ index[k]++;
+ break;
+ }
+ else {
+ index[k] = 0;
+ }
+ }
}
/* view is not checked for consistency in either of these. It is
@@ -454,235 +454,235 @@ _add_one_to_index_C(int nd, Py_ssize_t *index, Py_ssize_t *shape)
int
PyBuffer_ToContiguous(void *buf, Py_buffer *view, Py_ssize_t len, char fort)
{
- int k;
- void (*addone)(int, Py_ssize_t *, Py_ssize_t *);
- Py_ssize_t *indices, elements;
- char *dest, *ptr;
-
- if (len > view->len) {
- len = view->len;
- }
-
- if (PyBuffer_IsContiguous(view, fort)) {
- /* simplest copy is all that is needed */
- memcpy(buf, view->buf, len);
- return 0;
- }
-
- /* Otherwise a more elaborate scheme is needed */
-
+ int k;
+ void (*addone)(int, Py_ssize_t *, Py_ssize_t *);
+ Py_ssize_t *indices, elements;
+ char *dest, *ptr;
+
+ if (len > view->len) {
+ len = view->len;
+ }
+
+ if (PyBuffer_IsContiguous(view, fort)) {
+ /* simplest copy is all that is needed */
+ memcpy(buf, view->buf, len);
+ return 0;
+ }
+
+ /* Otherwise a more elaborate scheme is needed */
+
/* XXX(nnorwitz): need to check for overflow! */
- indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*(view->ndim));
- if (indices == NULL) {
- PyErr_NoMemory();
- return -1;
- }
- for (k=0; k<view->ndim;k++) {
- indices[k] = 0;
- }
-
- if (fort == 'F') {
- addone = _add_one_to_index_F;
- }
- else {
- addone = _add_one_to_index_C;
- }
- dest = buf;
- /* XXX : This is not going to be the fastest code in the world
- several optimizations are possible.
- */
- elements = len / view->itemsize;
- while (elements--) {
- addone(view->ndim, indices, view->shape);
- ptr = PyBuffer_GetPointer(view, indices);
- memcpy(dest, ptr, view->itemsize);
- dest += view->itemsize;
- }
- PyMem_Free(indices);
- return 0;
+ indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*(view->ndim));
+ if (indices == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ for (k=0; k<view->ndim;k++) {
+ indices[k] = 0;
+ }
+
+ if (fort == 'F') {
+ addone = _add_one_to_index_F;
+ }
+ else {
+ addone = _add_one_to_index_C;
+ }
+ dest = buf;
+ /* XXX : This is not going to be the fastest code in the world
+ several optimizations are possible.
+ */
+ elements = len / view->itemsize;
+ while (elements--) {
+ addone(view->ndim, indices, view->shape);
+ ptr = PyBuffer_GetPointer(view, indices);
+ memcpy(dest, ptr, view->itemsize);
+ dest += view->itemsize;
+ }
+ PyMem_Free(indices);
+ return 0;
}
int
PyBuffer_FromContiguous(Py_buffer *view, void *buf, Py_ssize_t len, char fort)
{
- int k;
- void (*addone)(int, Py_ssize_t *, Py_ssize_t *);
- Py_ssize_t *indices, elements;
- char *src, *ptr;
+ int k;
+ void (*addone)(int, Py_ssize_t *, Py_ssize_t *);
+ Py_ssize_t *indices, elements;
+ char *src, *ptr;
- if (len > view->len) {
- len = view->len;
- }
+ if (len > view->len) {
+ len = view->len;
+ }
- if (PyBuffer_IsContiguous(view, fort)) {
- /* simplest copy is all that is needed */
- memcpy(view->buf, buf, len);
- return 0;
- }
+ if (PyBuffer_IsContiguous(view, fort)) {
+ /* simplest copy is all that is needed */
+ memcpy(view->buf, buf, len);
+ return 0;
+ }
- /* Otherwise a more elaborate scheme is needed */
-
+ /* Otherwise a more elaborate scheme is needed */
+
/* XXX(nnorwitz): need to check for overflow! */
- indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*(view->ndim));
- if (indices == NULL) {
- PyErr_NoMemory();
- return -1;
- }
- for (k=0; k<view->ndim;k++) {
- indices[k] = 0;
- }
-
- if (fort == 'F') {
- addone = _add_one_to_index_F;
- }
- else {
- addone = _add_one_to_index_C;
- }
- src = buf;
- /* XXX : This is not going to be the fastest code in the world
- several optimizations are possible.
- */
- elements = len / view->itemsize;
- while (elements--) {
- addone(view->ndim, indices, view->shape);
- ptr = PyBuffer_GetPointer(view, indices);
- memcpy(ptr, src, view->itemsize);
- src += view->itemsize;
- }
-
- PyMem_Free(indices);
- return 0;
+ indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*(view->ndim));
+ if (indices == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ for (k=0; k<view->ndim;k++) {
+ indices[k] = 0;
+ }
+
+ if (fort == 'F') {
+ addone = _add_one_to_index_F;
+ }
+ else {
+ addone = _add_one_to_index_C;
+ }
+ src = buf;
+ /* XXX : This is not going to be the fastest code in the world
+ several optimizations are possible.
+ */
+ elements = len / view->itemsize;
+ while (elements--) {
+ addone(view->ndim, indices, view->shape);
+ ptr = PyBuffer_GetPointer(view, indices);
+ memcpy(ptr, src, view->itemsize);
+ src += view->itemsize;
+ }
+
+ PyMem_Free(indices);
+ return 0;
}
int PyObject_CopyData(PyObject *dest, PyObject *src)
{
- Py_buffer view_dest, view_src;
- int k;
- Py_ssize_t *indices, elements;
- char *dptr, *sptr;
-
- if (!PyObject_CheckBuffer(dest) ||
- !PyObject_CheckBuffer(src)) {
- PyErr_SetString(PyExc_TypeError,
- "both destination and source must have the "\
- "buffer interface");
- return -1;
- }
-
- if (PyObject_GetBuffer(dest, &view_dest, PyBUF_FULL) != 0) return -1;
- if (PyObject_GetBuffer(src, &view_src, PyBUF_FULL_RO) != 0) {
- PyObject_ReleaseBuffer(dest, &view_dest);
- return -1;
- }
-
- if (view_dest.len < view_src.len) {
- PyErr_SetString(PyExc_BufferError,
- "destination is too small to receive data from source");
- PyObject_ReleaseBuffer(dest, &view_dest);
- PyObject_ReleaseBuffer(src, &view_src);
- return -1;
- }
-
- if ((PyBuffer_IsContiguous(&view_dest, 'C') &&
- PyBuffer_IsContiguous(&view_src, 'C')) ||
- (PyBuffer_IsContiguous(&view_dest, 'F') &&
- PyBuffer_IsContiguous(&view_src, 'F'))) {
- /* simplest copy is all that is needed */
- memcpy(view_dest.buf, view_src.buf, view_src.len);
- PyObject_ReleaseBuffer(dest, &view_dest);
- PyObject_ReleaseBuffer(src, &view_src);
- return 0;
- }
-
- /* Otherwise a more elaborate copy scheme is needed */
-
+ Py_buffer view_dest, view_src;
+ int k;
+ Py_ssize_t *indices, elements;
+ char *dptr, *sptr;
+
+ if (!PyObject_CheckBuffer(dest) ||
+ !PyObject_CheckBuffer(src)) {
+ PyErr_SetString(PyExc_TypeError,
+ "both destination and source must have the "\
+ "buffer interface");
+ return -1;
+ }
+
+ if (PyObject_GetBuffer(dest, &view_dest, PyBUF_FULL) != 0) return -1;
+ if (PyObject_GetBuffer(src, &view_src, PyBUF_FULL_RO) != 0) {
+ PyObject_ReleaseBuffer(dest, &view_dest);
+ return -1;
+ }
+
+ if (view_dest.len < view_src.len) {
+ PyErr_SetString(PyExc_BufferError,
+ "destination is too small to receive data from source");
+ PyObject_ReleaseBuffer(dest, &view_dest);
+ PyObject_ReleaseBuffer(src, &view_src);
+ return -1;
+ }
+
+ if ((PyBuffer_IsContiguous(&view_dest, 'C') &&
+ PyBuffer_IsContiguous(&view_src, 'C')) ||
+ (PyBuffer_IsContiguous(&view_dest, 'F') &&
+ PyBuffer_IsContiguous(&view_src, 'F'))) {
+ /* simplest copy is all that is needed */
+ memcpy(view_dest.buf, view_src.buf, view_src.len);
+ PyObject_ReleaseBuffer(dest, &view_dest);
+ PyObject_ReleaseBuffer(src, &view_src);
+ return 0;
+ }
+
+ /* Otherwise a more elaborate copy scheme is needed */
+
/* XXX(nnorwitz): need to check for overflow! */
- indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*view_src.ndim);
- if (indices == NULL) {
- PyErr_NoMemory();
- PyObject_ReleaseBuffer(dest, &view_dest);
- PyObject_ReleaseBuffer(src, &view_src);
- return -1;
- }
- for (k=0; k<view_src.ndim;k++) {
- indices[k] = 0;
- }
- elements = 1;
- for (k=0; k<view_src.ndim; k++) {
+ indices = (Py_ssize_t *)PyMem_Malloc(sizeof(Py_ssize_t)*view_src.ndim);
+ if (indices == NULL) {
+ PyErr_NoMemory();
+ PyObject_ReleaseBuffer(dest, &view_dest);
+ PyObject_ReleaseBuffer(src, &view_src);
+ return -1;
+ }
+ for (k=0; k<view_src.ndim;k++) {
+ indices[k] = 0;
+ }
+ elements = 1;
+ for (k=0; k<view_src.ndim; k++) {
/* XXX(nnorwitz): can this overflow? */
- elements *= view_src.shape[k];
- }
- while (elements--) {
- _add_one_to_index_C(view_src.ndim, indices, view_src.shape);
- dptr = PyBuffer_GetPointer(&view_dest, indices);
- sptr = PyBuffer_GetPointer(&view_src, indices);
- memcpy(dptr, sptr, view_src.itemsize);
- }
- PyMem_Free(indices);
- PyObject_ReleaseBuffer(dest, &view_dest);
- PyObject_ReleaseBuffer(src, &view_src);
- return 0;
+ elements *= view_src.shape[k];
+ }
+ while (elements--) {
+ _add_one_to_index_C(view_src.ndim, indices, view_src.shape);
+ dptr = PyBuffer_GetPointer(&view_dest, indices);
+ sptr = PyBuffer_GetPointer(&view_src, indices);
+ memcpy(dptr, sptr, view_src.itemsize);
+ }
+ PyMem_Free(indices);
+ PyObject_ReleaseBuffer(dest, &view_dest);
+ PyObject_ReleaseBuffer(src, &view_src);
+ return 0;
}
void
PyBuffer_FillContiguousStrides(int nd, Py_ssize_t *shape,
- Py_ssize_t *strides, int itemsize,
- char fort)
-{
- int k;
- Py_ssize_t sd;
-
- sd = itemsize;
- if (fort == 'F') {
- for (k=0; k<nd; k++) {
- strides[k] = sd;
- sd *= shape[k];
- }
- }
- else {
- for (k=nd-1; k>=0; k--) {
- strides[k] = sd;
- sd *= shape[k];
- }
- }
- return;
+ Py_ssize_t *strides, int itemsize,
+ char fort)
+{
+ int k;
+ Py_ssize_t sd;
+
+ sd = itemsize;
+ if (fort == 'F') {
+ for (k=0; k<nd; k++) {
+ strides[k] = sd;
+ sd *= shape[k];
+ }
+ }
+ else {
+ for (k=nd-1; k>=0; k--) {
+ strides[k] = sd;
+ sd *= shape[k];
+ }
+ }
+ return;
}
int
PyBuffer_FillInfo(Py_buffer *view, void *buf, Py_ssize_t len,
- int readonly, int flags)
-{
- if (view == NULL) return 0;
- if (((flags & PyBUF_LOCKDATA) == PyBUF_LOCKDATA) &&
- readonly != -1) {
- PyErr_SetString(PyExc_BufferError,
- "Cannot make this object read-only.");
- return -1;
- }
- if (((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE) &&
- readonly == 1) {
- PyErr_SetString(PyExc_BufferError,
- "Object is not writable.");
- return -1;
- }
-
- view->buf = buf;
- view->len = len;
- view->readonly = readonly;
- view->itemsize = 1;
- view->format = NULL;
- if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT)
- view->format = "B";
- view->ndim = 1;
- view->shape = NULL;
- if ((flags & PyBUF_ND) == PyBUF_ND)
- view->shape = &(view->len);
- view->strides = NULL;
- if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES)
- view->strides = &(view->itemsize);
- view->suboffsets = NULL;
- view->internal = NULL;
- return 0;
+ int readonly, int flags)
+{
+ if (view == NULL) return 0;
+ if (((flags & PyBUF_LOCK) == PyBUF_LOCK) &&
+ readonly >= 0) {
+ PyErr_SetString(PyExc_BufferError,
+ "Cannot lock this object.");
+ return -1;
+ }
+ if (((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE) &&
+ (readonly == 1)) {
+ PyErr_SetString(PyExc_BufferError,
+ "Object is not writable.");
+ return -1;
+ }
+
+ view->buf = buf;
+ view->len = len;
+ view->readonly = readonly;
+ view->itemsize = 1;
+ view->format = NULL;
+ if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT)
+ view->format = "B";
+ view->ndim = 1;
+ view->shape = NULL;
+ if ((flags & PyBUF_ND) == PyBUF_ND)
+ view->shape = &(view->len);
+ view->strides = NULL;
+ if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES)
+ view->strides = &(view->itemsize);
+ view->suboffsets = NULL;
+ view->internal = NULL;
+ return 0;
}
/* Operations on numbers */
@@ -1025,7 +1025,7 @@ PyNumber_InPlaceAdd(PyObject *v, PyObject *w)
Py_DECREF(result);
if (m != NULL) {
binaryfunc f = NULL;
- f = m->sq_inplace_concat;
+ f = m->sq_inplace_concat;
if (f == NULL)
f = m->sq_concat;
if (f != NULL)
@@ -1304,8 +1304,8 @@ PyNumber_Float(PyObject *o)
PyObject *res = m->nb_float(o);
if (res && !PyFloat_Check(res)) {
PyErr_Format(PyExc_TypeError,
- "__float__ returned non-float (type %.200s)",
- res->ob_type->tp_name);
+ "__float__ returned non-float (type %.200s)",
+ res->ob_type->tp_name);
Py_DECREF(res);
return NULL;
}
@@ -1384,7 +1384,7 @@ PySequence_Concat(PyObject *s, PyObject *o)
return m->sq_concat(s, o);
/* Instances of user classes defining an __add__() method only
- have an nb_add slot, not an sq_concat slot. So we fall back
+ have an nb_add slot, not an sq_concat slot. So we fall back
to nb_add if both arguments appear to be sequences. */
if (PySequence_Check(s) && PySequence_Check(o)) {
PyObject *result = binary_op1(s, o, NB_SLOT(nb_add));
@@ -1629,7 +1629,7 @@ PyObject *
PySequence_Tuple(PyObject *v)
{
PyObject *it; /* iter(v) */
- Py_ssize_t n; /* guess for result tuple size */
+ Py_ssize_t n; /* guess for result tuple size */
PyObject *result;
Py_ssize_t j;
@@ -1662,7 +1662,7 @@ PySequence_Tuple(PyObject *v)
return NULL;
}
PyErr_Clear();
- n = 10; /* arbitrary */
+ n = 10; /* arbitrary */
}
result = PyTuple_New(n);
if (result == NULL)
@@ -1718,7 +1718,7 @@ PyObject *
PySequence_List(PyObject *v)
{
PyObject *result; /* result list */
- PyObject *rv; /* return value from PyList_Extend */
+ PyObject *rv; /* return value from PyList_Extend */
if (v == NULL)
return null_error();
@@ -1749,7 +1749,7 @@ PySequence_Fast(PyObject *v, const char *m)
return v;
}
- it = PyObject_GetIter(v);
+ it = PyObject_GetIter(v);
if (it == NULL) {
if (PyErr_ExceptionMatches(PyExc_TypeError))
PyErr_SetString(PyExc_TypeError, m);
@@ -1763,9 +1763,9 @@ PySequence_Fast(PyObject *v, const char *m)
}
/* Iterate over seq. Result depends on the operation:
- PY_ITERSEARCH_COUNT: -1 if error, else # of times obj appears in seq.
- PY_ITERSEARCH_INDEX: 0-based index of first occurence of obj in seq;
- set ValueError and return -1 if none found; also return -1 on error.
+ PY_ITERSEARCH_COUNT: -1 if error, else # of times obj appears in seq.
+ PY_ITERSEARCH_INDEX: 0-based index of first occurence of obj in seq;
+ set ValueError and return -1 if none found; also return -1 on error.
Py_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on error.
*/
Py_ssize_t
@@ -1839,7 +1839,7 @@ _PySequence_IterSearch(PyObject *seq, PyObject *obj, int operation)
goto Done;
PyErr_SetString(PyExc_ValueError,
- "sequence.index(x): x not in sequence");
+ "sequence.index(x): x not in sequence");
/* fall into failure code */
Fail:
n = -1;
@@ -1865,7 +1865,7 @@ PySequence_Contains(PyObject *seq, PyObject *ob)
{
Py_ssize_t result;
PySequenceMethods *sqm = seq->ob_type->tp_as_sequence;
- if (sqm != NULL && sqm->sq_contains != NULL)
+ if (sqm != NULL && sqm->sq_contains != NULL)
return (*sqm->sq_contains)(seq, ob);
result = _PySequence_IterSearch(seq, ob, PY_ITERSEARCH_CONTAINS);
return Py_SAFE_DOWNCAST(result, Py_ssize_t, int);
@@ -1890,7 +1890,7 @@ PySequence_Index(PyObject *s, PyObject *o)
int
PyMapping_Check(PyObject *o)
{
- return o && o->ob_type->tp_as_mapping &&
+ return o && o->ob_type->tp_as_mapping &&
o->ob_type->tp_as_mapping->mp_subscript;
}
@@ -2044,7 +2044,7 @@ PyObject_CallObject(PyObject *o, PyObject *a)
PyObject *
PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw)
{
- ternaryfunc call;
+ ternaryfunc call;
if ((call = func->ob_type->tp_call) != NULL) {
PyObject *result;
@@ -2308,7 +2308,7 @@ PyObject_CallFunctionObjArgs(PyObject *callable, ...)
* produce exactly the same results: NULL is returned and no error is set.
*
* If some exception other than AttributeError is raised, then NULL is also
- * returned, but the exception is not cleared. That's because we want the
+ * returned, but the exception is not cleared. That's because we want the
* exception to be propagated along.
*
* Callers are expected to test for PyErr_Occurred() when the return value
@@ -2336,7 +2336,7 @@ abstract_get_bases(PyObject *cls)
return NULL;
}
if (!PyTuple_Check(bases)) {
- Py_DECREF(bases);
+ Py_DECREF(bases);
return NULL;
}
return bases;
@@ -2428,18 +2428,18 @@ recursive_isinstance(PyObject *inst, PyObject *cls, int recursion_depth)
else if (PyTuple_Check(cls)) {
Py_ssize_t i, n;
- if (!recursion_depth) {
- PyErr_SetString(PyExc_RuntimeError,
- "nest level of tuple too deep");
- return -1;
- }
+ if (!recursion_depth) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "nest level of tuple too deep");
+ return -1;
+ }
n = PyTuple_GET_SIZE(cls);
for (i = 0; i < n; i++) {
retval = recursive_isinstance(
- inst,
- PyTuple_GET_ITEM(cls, i),
- recursion_depth-1);
+ inst,
+ PyTuple_GET_ITEM(cls, i),
+ recursion_depth-1);
if (retval != 0)
break;
}
@@ -2490,12 +2490,12 @@ PyObject_IsInstance(PyObject *inst, PyObject *cls)
return recursive_isinstance(inst, cls, Py_GetRecursionLimit());
}
-static int
+static int
recursive_issubclass(PyObject *derived, PyObject *cls, int recursion_depth)
{
int retval;
- {
+ {
if (!check_class(derived,
"issubclass() arg 1 must be a class"))
return -1;
@@ -2504,16 +2504,16 @@ recursive_issubclass(PyObject *derived, PyObject *cls, int recursion_depth)
Py_ssize_t i;
Py_ssize_t n = PyTuple_GET_SIZE(cls);
- if (!recursion_depth) {
- PyErr_SetString(PyExc_RuntimeError,
- "nest level of tuple too deep");
- return -1;
- }
+ if (!recursion_depth) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "nest level of tuple too deep");
+ return -1;
+ }
for (i = 0; i < n; ++i) {
retval = recursive_issubclass(
- derived,
- PyTuple_GET_ITEM(cls, i),
- recursion_depth-1);
+ derived,
+ PyTuple_GET_ITEM(cls, i),
+ recursion_depth-1);
if (retval != 0) {
/* either found it, or got an error */
return retval;
@@ -2590,7 +2590,7 @@ PyObject_GetIter(PyObject *o)
* If the iteration terminates normally, return NULL and clear the
* PyExc_StopIteration exception (if it was set). PyErr_Occurred()
* will be false.
- * Else return the next object. PyErr_Occurred() will be false.
+ * Else return the next object. PyErr_Occurred() will be false.
*/
PyObject *
PyIter_Next(PyObject *iter)
diff --git a/Objects/memoryobject.c b/Objects/memoryobject.c
index f2f7505..02f21ce 100644
--- a/Objects/memoryobject.c
+++ b/Objects/memoryobject.c
@@ -196,7 +196,9 @@ _indirect_copy_nd(char *dest, Py_buffer *view, char fort)
a contiguous buffer if it is not. The view will point to
the shadow buffer which can be written to and then
will be copied back into the other buffer when the memory
- view is de-allocated.
+ view is de-allocated. While the shadow buffer is
+ being used, it will have an exclusive write lock on
+ the original buffer.
*/
PyObject *
@@ -224,7 +226,7 @@ PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char fort)
flags = PyBUF_FULL;
break;
case PyBUF_SHADOW:
- flags = PyBUF_FULL_LCK;
+ flags = PyBUF_FULL_XLCK;
break;
}
@@ -431,11 +433,7 @@ memory_dealloc(PyMemoryViewObject *self)
static PyObject *
memory_repr(PyMemoryViewObject *self)
{
- /* XXX(nnorwitz): the code should be different or remove condition. */
- if (self->base == NULL)
- return PyUnicode_FromFormat("<memory at %p>", self);
- else
- return PyUnicode_FromFormat("<memory at %p>", self);
+ return PyUnicode_FromFormat("<memory at %p>", self);
}
@@ -502,6 +500,14 @@ memory_subscript(PyMemoryViewObject *self, PyObject *key)
/* Return a bytes object */
char *ptr;
ptr = (char *)view->buf;
+ if (result < 0) {
+ result += view->shape[0];
+ }
+ if ((result < 0) || (result > view->shape[0])) {
+ PyErr_SetString(PyExc_IndexError,
+ "index out of bounds");
+ return NULL;
+ }
if (view->strides == NULL)
ptr += view->itemsize * result;
else
@@ -517,14 +523,20 @@ memory_subscript(PyMemoryViewObject *self, PyObject *key)
/* Return a new memory-view object */
Py_buffer newview;
memset(&newview, 0, sizeof(newview));
+ /* XXX: This needs to be fixed so it
+ actually returns a sub-view
+ */
return PyMemoryView_FromMemory(&newview);
}
}
+ /* Need to support getting a sliced view */
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
+
+/* Need to support assigning memory if we can */
static int
memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
{