summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRaymond Lu <songyulu@hdfgroup.org>2003-02-04 18:50:56 (GMT)
committerRaymond Lu <songyulu@hdfgroup.org>2003-02-04 18:50:56 (GMT)
commit3879dcce1b831fd553ff15256661ca7cc23ca70d (patch)
treef4a6ed3b08b7ac2926b396f497fdd62faf35bb6c
parent092a41fe51ea58ab388d5f5d577f7d4a16a1f5b8 (diff)
downloadhdf5-3879dcce1b831fd553ff15256661ca7cc23ca70d.zip
hdf5-3879dcce1b831fd553ff15256661ca7cc23ca70d.tar.gz
hdf5-3879dcce1b831fd553ff15256661ca7cc23ca70d.tar.bz2
[svn-r6375]
Purpose: New feature Description: Added Adler32 checksum as a filter in pipeline Platforms tested: arabica (fortran), eirene (, C++), modi4 (parallel, fortran) Misc. update: Update release_docs/RELEASE.
-rwxr-xr-xconfigure9
-rw-r--r--configure.in6
-rw-r--r--release_docs/RELEASE.txt2
-rw-r--r--src/H5D.c12
-rw-r--r--src/H5Distore.c94
-rw-r--r--src/H5Dprivate.h8
-rw-r--r--src/H5Fistore.c94
-rw-r--r--src/H5Pdcpl.c42
-rw-r--r--src/H5Pdxpl.c125
-rw-r--r--src/H5Ppublic.h5
-rw-r--r--src/H5Tconv.c2
-rw-r--r--src/H5Tvlen.c10
-rw-r--r--src/H5Z.c71
-rw-r--r--src/H5Zadler32.c139
-rw-r--r--src/H5Zdeflate.c10
-rw-r--r--src/H5Zprivate.h11
-rw-r--r--src/H5Zpublic.h31
-rw-r--r--src/H5Zshuffle.c23
-rw-r--r--src/H5config.h.in3
-rw-r--r--src/Makefile.in2
-rw-r--r--test/dsets.c508
21 files changed, 987 insertions, 220 deletions
diff --git a/configure b/configure
index a9d5ed9..34ca1c4 100755
--- a/configure
+++ b/configure
@@ -30341,7 +30341,7 @@ if test "${enable_filters+set}" = set; then
fi;
-all_filters="shuffle"
+all_filters="shuffle,adler32"
case "X-$FILTERS" in
X-|X-all)
FILTERS=$all_filters
@@ -30368,6 +30368,13 @@ cat >>confdefs.h <<\_ACEOF
_ACEOF
fi
+ if test $filter = "ADLER32"; then
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FILTER_ADLER32 1
+_ACEOF
+
+ fi
done
fi
diff --git a/configure.in b/configure.in
index 4c3b3db..af929fe 100644
--- a/configure.in
+++ b/configure.in
@@ -1871,7 +1871,7 @@ AC_ARG_ENABLE([filters],
AC_SUBST([FILTERS])
dnl Eventually: all_filters="shuffle,foo,bar,baz"
-all_filters="shuffle"
+all_filters="shuffle,adler32"
case "X-$FILTERS" in
X-|X-all)
FILTERS=$all_filters
@@ -1896,6 +1896,10 @@ dnl
AC_DEFINE(HAVE_FILTER_SHUFFLE, 1,
[Define if support for shuffle filter is enabled])
fi
+ if test $filter = "ADLER32"; then
+ AC_DEFINE(HAVE_FILTER_ADLER32, 1,
+ [Define if support for Adler32 checksum is enabled])
+ fi
done
fi
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index 34fe285..4c0115b 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -250,6 +250,8 @@ Documentation
New Features
============
+ * Added Adler32 checksum as a filter in pipeline. It only works in chunked
+ dataset. SLU - 2003/2/4
* MPICH/MPE instrumentation feature added. Use --enable-mpe to configure
it. AKC - 2003/1/3
* New functions H5Gget_num_objs, H5Gget_objname_by_idx and H5Gget_objtype_by_idx
diff --git a/src/H5D.c b/src/H5D.c
index 7bdef97..0aa194a 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -167,6 +167,8 @@ H5D_init_interface(void)
* - Default value for file driver info
* - Default value for 'gather reads' property
* - Default value for vector size
+ * - Default value for EDC property
+ * - Default value for filter callback
*/
H5P_genclass_t *xfer_pclass;
size_t def_max_temp_buf = H5D_XFER_MAX_TEMP_BUF_DEF;
@@ -185,6 +187,8 @@ H5D_init_interface(void)
hid_t def_vfl_id = H5D_XFER_VFL_ID_DEF;
void *def_vfl_info = H5D_XFER_VFL_INFO_DEF;
size_t def_hyp_vec_size = H5D_XFER_HYPER_VECTOR_SIZE_DEF;
+ H5Z_EDC_t enable_edc = H5D_XFER_EDC_DEF;
+ H5Z_cb_t filter_cb = H5D_XFER_FILTER_CB_DEF;
/* Dataset creation property class variables. In sequence, they are,
* - Creation property list class to modify
@@ -286,6 +290,14 @@ H5D_init_interface(void)
/* Register the vector size property */
if(H5P_register(xfer_pclass,H5D_XFER_HYPER_VECTOR_SIZE_NAME,H5D_XFER_HYPER_VECTOR_SIZE_SIZE,&def_hyp_vec_size,NULL,NULL,NULL,NULL,NULL,NULL)<0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class");
+
+ /* Register the EDC property */
+ if(H5P_register(xfer_pclass,H5D_XFER_EDC_NAME,H5D_XFER_EDC_SIZE,&enable_edc,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class");
+
+ /* Register the filter callback property */
+ if(H5P_register(xfer_pclass,H5D_XFER_FILTER_CB_NAME,H5D_XFER_FILTER_CB_SIZE,&filter_cb,NULL,NULL,NULL,NULL,NULL,NULL)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class");
} /* end if */
/* Only register the default property list if it hasn't been created yet */
diff --git a/src/H5Distore.c b/src/H5Distore.c
index aa88704..eefa690 100644
--- a/src/H5Distore.c
+++ b/src/H5Distore.c
@@ -898,12 +898,15 @@ H5F_istore_flush_entry(H5F_t *f, H5F_rdcc_ent_t *ent, hbool_t reset)
void *buf=NULL; /*temporary buffer */
size_t alloc; /*bytes allocated for BUF */
hbool_t point_of_no_return = FALSE;
-
+ H5Z_cb_t cb_struct={NULL,NULL};
+ H5Z_EDC_t edc=H5Z_ENABLE_EDC;
+
FUNC_ENTER_NOINIT(H5F_istore_flush_entry);
assert(f);
assert(ent);
assert(!ent->locked);
+ HDmemset(&udata, 0, sizeof(H5F_istore_ud1_t));
buf = ent->chunk;
if (ent->dirty) {
@@ -941,8 +944,11 @@ H5F_istore_flush_entry(H5F_t *f, H5F_rdcc_ent_t *ent, hbool_t reset)
point_of_no_return = TRUE;
ent->chunk = NULL;
}
- if (H5Z_pipeline(f, ent->pline, 0, &(udata.key.filter_mask),
- &(udata.key.nbytes), &alloc, &buf)<0) {
+ /* Don't know whether we should involve transfer property list. So
+ * just pass in H5Z_ENABLE_EDC and default callback setting for data
+ * read. */
+ if (H5Z_pipeline(f, ent->pline, 0, &(udata.key.filter_mask), edc,
+ cb_struct, &(udata.key.nbytes), &alloc, &buf)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL,
"output pipeline failed");
}
@@ -1322,13 +1328,19 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
H5F_istore_ud1_t udata; /*B-tree pass-through */
size_t chunk_size=0; /*size of a chunk */
hsize_t tempchunk_size;
- size_t chunk_alloc=0; /*allocated chunk size */
herr_t status; /*func return status */
void *chunk=NULL; /*the file chunk */
void *ret_value; /*return value */
H5P_genplist_t *plist=NULL; /* Property list */
+ H5Z_EDC_t edc;
+ H5Z_cb_t cb_struct;
FUNC_ENTER_NOINIT(H5F_istore_lock);
+
+ assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
+ plist=H5I_object(dxpl_id);
+ assert(plist!=NULL);
+ HDmemset(&udata, 0, sizeof(H5F_istore_ud1_t));
if (rdcc->nslots>0) {
for (u=0, temp_idx=0; u<layout->ndims; u++) {
@@ -1371,11 +1383,11 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
for (u=0, tempchunk_size=1; u<layout->ndims; u++)
tempchunk_size *= layout->dim[u];
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
- chunk_alloc = chunk_size;
- if (NULL==(chunk=H5MM_malloc (chunk_alloc)))
+ if (NULL==(chunk=H5MM_malloc (chunk_size)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
} else {
+
/*
* Not in the cache. Read it from the file and count this as a miss
* if it's in the file or an init if it isn't.
@@ -1385,40 +1397,54 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
tempchunk_size *= layout->dim[u];
}
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
- chunk_alloc = chunk_size;
udata.mesg = *layout;
udata.addr = HADDR_UNDEF;
status = H5B_find (f, H5B_ISTORE, layout->addr, &udata);
H5E_clear ();
- if (NULL==(chunk = H5MM_malloc (chunk_alloc)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
+
if (status>=0 && H5F_addr_defined(udata.addr)) {
+ size_t chunk_alloc=0; /*allocated chunk size */
+
/*
* The chunk exists on disk.
*/
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ chunk_alloc = udata.key.nbytes;
+ if (NULL==(chunk = H5MM_malloc (chunk_alloc)))
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
if (H5F_block_read(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, H5P_DATASET_XFER_DEFAULT, chunk)<0)
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk");
- if (H5Z_pipeline(f, pline, H5Z_FLAG_REVERSE,
- &(udata.key.filter_mask), &(udata.key.nbytes),
- &chunk_alloc, &chunk)<0 || udata.key.nbytes!=chunk_size)
+ if(H5P_get(plist,H5D_XFER_EDC_NAME,&edc)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get edc information");
+ if(H5P_get(plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get filter callback struct");
+ if (H5Z_pipeline(f, pline, H5Z_FLAG_REVERSE, &(udata.key.filter_mask), edc,
+ cb_struct, &(udata.key.nbytes), &chunk_alloc, &chunk)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_READERROR, NULL, "data pipeline read failed");
+ }
rdcc->nmisses++;
- } else if (fill && fill->buf) {
- /*
- * The chunk doesn't exist in the file. Replicate the fill
- * value throughout the chunk.
- */
- assert(0==chunk_size % fill->size);
- H5V_array_fill(chunk, fill->buf, fill->size, chunk_size/fill->size);
- rdcc->ninits++;
} else {
- /*
- * The chunk doesn't exist in the file and no fill value was
- * specified. Assume all zeros.
- */
- HDmemset (chunk, 0, chunk_size);
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ if (NULL==(chunk = H5MM_malloc (chunk_size)))
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
+ if (fill && fill->buf) {
+ /*
+ * The chunk doesn't exist in the file. Replicate the fill
+ * value throughout the chunk.
+ */
+ assert(0==chunk_size % fill->size);
+ H5V_array_fill(chunk, fill->buf, fill->size, chunk_size/fill->size);
+ } else {
+ /*
+ * The chunk doesn't exist in the file and no fill value was
+ * specified. Assume all zeros.
+ */
+ HDmemset (chunk, 0, chunk_size);
+ }
rdcc->ninits++;
- }
+ } /* end else */
}
assert (found || chunk_size>0);
@@ -1436,6 +1462,7 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
if (H5F_istore_preempt(f, ent, TRUE)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache");
}
+
if (H5F_istore_prune(f, chunk_size)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache");
@@ -1453,9 +1480,6 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
ent->wr_count = chunk_size;
ent->chunk = chunk;
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
- plist=H5I_object(dxpl_id);
- assert(plist!=NULL);
H5P_get(plist,H5D_XFER_BTREE_SPLIT_RATIO_NAME,&(ent->split_ratios));
/* Add it to the cache */
@@ -1476,7 +1500,6 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
ent->prev = NULL;
}
found = TRUE;
-
} else if (!found) {
/*
* The chunk is larger than the entire cache so we don't cache it.
@@ -1523,7 +1546,8 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
done:
if (!ret_value)
- H5MM_xfree (chunk);
+ if(chunk)
+ H5MM_xfree (chunk);
FUNC_LEAVE_NOAPI(ret_value);
}
@@ -2337,6 +2361,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
unsigned chunk_exists; /* Flag to indicate whether a chunk exists already */
int i; /* Local index variable */
unsigned u; /* Local index variable */
+ H5Z_EDC_t edc; /* Decide whether to enable EDC for read */
+ H5Z_cb_t cb_struct;
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_istore_allocate, FAIL);
@@ -2363,6 +2389,10 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list");
if(H5P_get(dx_plist,H5D_XFER_BTREE_SPLIT_RATIO_NAME,split_ratios)<0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "can't get B-tree split ratios");
+ if(H5P_get(dx_plist,H5D_XFER_EDC_NAME,&edc)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get edc information");
+ if(H5P_get(dx_plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get filter callback struct");
#ifdef H5_HAVE_PARALLEL
/* Retrieve up MPI parameters */
@@ -2435,7 +2465,7 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
size_t nbytes=(size_t)chunk_size;
/* Push the chunk through the filters */
- if (H5Z_pipeline(f, &pline, 0, &filter_mask, &nbytes, &buf_size, &chunk)<0)
+ if (H5Z_pipeline(f, &pline, 0, &filter_mask, edc, cb_struct, &nbytes, &buf_size, &chunk)<0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed");
/* Keep the number of bytes the chunk turned in to */
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index cfef073..239a187 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -140,6 +140,14 @@
#define H5D_XFER_HYPER_VECTOR_SIZE_NAME "vec_size"
#define H5D_XFER_HYPER_VECTOR_SIZE_SIZE sizeof(size_t)
#define H5D_XFER_HYPER_VECTOR_SIZE_DEF 1024
+/* Definitions for EDC property */
+#define H5D_XFER_EDC_NAME "error-detecting"
+#define H5D_XFER_EDC_SIZE sizeof(H5Z_EDC_t)
+#define H5D_XFER_EDC_DEF H5Z_ENABLE_EDC
+/* Definitions for filter callback function property */
+#define H5D_XFER_FILTER_CB_NAME "filter_cb"
+#define H5D_XFER_FILTER_CB_SIZE sizeof(H5Z_cb_t)
+#define H5D_XFER_FILTER_CB_DEF {NULL,NULL}
/*
* A dataset is the following struct.
diff --git a/src/H5Fistore.c b/src/H5Fistore.c
index aa88704..eefa690 100644
--- a/src/H5Fistore.c
+++ b/src/H5Fistore.c
@@ -898,12 +898,15 @@ H5F_istore_flush_entry(H5F_t *f, H5F_rdcc_ent_t *ent, hbool_t reset)
void *buf=NULL; /*temporary buffer */
size_t alloc; /*bytes allocated for BUF */
hbool_t point_of_no_return = FALSE;
-
+ H5Z_cb_t cb_struct={NULL,NULL};
+ H5Z_EDC_t edc=H5Z_ENABLE_EDC;
+
FUNC_ENTER_NOINIT(H5F_istore_flush_entry);
assert(f);
assert(ent);
assert(!ent->locked);
+ HDmemset(&udata, 0, sizeof(H5F_istore_ud1_t));
buf = ent->chunk;
if (ent->dirty) {
@@ -941,8 +944,11 @@ H5F_istore_flush_entry(H5F_t *f, H5F_rdcc_ent_t *ent, hbool_t reset)
point_of_no_return = TRUE;
ent->chunk = NULL;
}
- if (H5Z_pipeline(f, ent->pline, 0, &(udata.key.filter_mask),
- &(udata.key.nbytes), &alloc, &buf)<0) {
+ /* Don't know whether we should involve transfer property list. So
+ * just pass in H5Z_ENABLE_EDC and default callback setting for data
+ * read. */
+ if (H5Z_pipeline(f, ent->pline, 0, &(udata.key.filter_mask), edc,
+ cb_struct, &(udata.key.nbytes), &alloc, &buf)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL,
"output pipeline failed");
}
@@ -1322,13 +1328,19 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
H5F_istore_ud1_t udata; /*B-tree pass-through */
size_t chunk_size=0; /*size of a chunk */
hsize_t tempchunk_size;
- size_t chunk_alloc=0; /*allocated chunk size */
herr_t status; /*func return status */
void *chunk=NULL; /*the file chunk */
void *ret_value; /*return value */
H5P_genplist_t *plist=NULL; /* Property list */
+ H5Z_EDC_t edc;
+ H5Z_cb_t cb_struct;
FUNC_ENTER_NOINIT(H5F_istore_lock);
+
+ assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
+ plist=H5I_object(dxpl_id);
+ assert(plist!=NULL);
+ HDmemset(&udata, 0, sizeof(H5F_istore_ud1_t));
if (rdcc->nslots>0) {
for (u=0, temp_idx=0; u<layout->ndims; u++) {
@@ -1371,11 +1383,11 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
for (u=0, tempchunk_size=1; u<layout->ndims; u++)
tempchunk_size *= layout->dim[u];
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
- chunk_alloc = chunk_size;
- if (NULL==(chunk=H5MM_malloc (chunk_alloc)))
+ if (NULL==(chunk=H5MM_malloc (chunk_size)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
} else {
+
/*
* Not in the cache. Read it from the file and count this as a miss
* if it's in the file or an init if it isn't.
@@ -1385,40 +1397,54 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
tempchunk_size *= layout->dim[u];
}
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
- chunk_alloc = chunk_size;
udata.mesg = *layout;
udata.addr = HADDR_UNDEF;
status = H5B_find (f, H5B_ISTORE, layout->addr, &udata);
H5E_clear ();
- if (NULL==(chunk = H5MM_malloc (chunk_alloc)))
- HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
+
if (status>=0 && H5F_addr_defined(udata.addr)) {
+ size_t chunk_alloc=0; /*allocated chunk size */
+
/*
* The chunk exists on disk.
*/
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ chunk_alloc = udata.key.nbytes;
+ if (NULL==(chunk = H5MM_malloc (chunk_alloc)))
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
if (H5F_block_read(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, H5P_DATASET_XFER_DEFAULT, chunk)<0)
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk");
- if (H5Z_pipeline(f, pline, H5Z_FLAG_REVERSE,
- &(udata.key.filter_mask), &(udata.key.nbytes),
- &chunk_alloc, &chunk)<0 || udata.key.nbytes!=chunk_size)
+ if(H5P_get(plist,H5D_XFER_EDC_NAME,&edc)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get edc information");
+ if(H5P_get(plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get filter callback struct");
+ if (H5Z_pipeline(f, pline, H5Z_FLAG_REVERSE, &(udata.key.filter_mask), edc,
+ cb_struct, &(udata.key.nbytes), &chunk_alloc, &chunk)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_READERROR, NULL, "data pipeline read failed");
+ }
rdcc->nmisses++;
- } else if (fill && fill->buf) {
- /*
- * The chunk doesn't exist in the file. Replicate the fill
- * value throughout the chunk.
- */
- assert(0==chunk_size % fill->size);
- H5V_array_fill(chunk, fill->buf, fill->size, chunk_size/fill->size);
- rdcc->ninits++;
} else {
- /*
- * The chunk doesn't exist in the file and no fill value was
- * specified. Assume all zeros.
- */
- HDmemset (chunk, 0, chunk_size);
+ /* Chunk size on disk isn't [likely] the same size as the final chunk
+ * size in memory, so allocate memory big enough. */
+ if (NULL==(chunk = H5MM_malloc (chunk_size)))
+ HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
+ if (fill && fill->buf) {
+ /*
+ * The chunk doesn't exist in the file. Replicate the fill
+ * value throughout the chunk.
+ */
+ assert(0==chunk_size % fill->size);
+ H5V_array_fill(chunk, fill->buf, fill->size, chunk_size/fill->size);
+ } else {
+ /*
+ * The chunk doesn't exist in the file and no fill value was
+ * specified. Assume all zeros.
+ */
+ HDmemset (chunk, 0, chunk_size);
+ }
rdcc->ninits++;
- }
+ } /* end else */
}
assert (found || chunk_size>0);
@@ -1436,6 +1462,7 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
if (H5F_istore_preempt(f, ent, TRUE)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache");
}
+
if (H5F_istore_prune(f, chunk_size)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache");
@@ -1453,9 +1480,6 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
ent->wr_count = chunk_size;
ent->chunk = chunk;
- assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
- plist=H5I_object(dxpl_id);
- assert(plist!=NULL);
H5P_get(plist,H5D_XFER_BTREE_SPLIT_RATIO_NAME,&(ent->split_ratios));
/* Add it to the cache */
@@ -1476,7 +1500,6 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
ent->prev = NULL;
}
found = TRUE;
-
} else if (!found) {
/*
* The chunk is larger than the entire cache so we don't cache it.
@@ -1523,7 +1546,8 @@ H5F_istore_lock(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
done:
if (!ret_value)
- H5MM_xfree (chunk);
+ if(chunk)
+ H5MM_xfree (chunk);
FUNC_LEAVE_NOAPI(ret_value);
}
@@ -2337,6 +2361,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
unsigned chunk_exists; /* Flag to indicate whether a chunk exists already */
int i; /* Local index variable */
unsigned u; /* Local index variable */
+ H5Z_EDC_t edc; /* Decide whether to enable EDC for read */
+ H5Z_cb_t cb_struct;
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_istore_allocate, FAIL);
@@ -2363,6 +2389,10 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list");
if(H5P_get(dx_plist,H5D_XFER_BTREE_SPLIT_RATIO_NAME,split_ratios)<0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "can't get B-tree split ratios");
+ if(H5P_get(dx_plist,H5D_XFER_EDC_NAME,&edc)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get edc information");
+ if(H5P_get(dx_plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get filter callback struct");
#ifdef H5_HAVE_PARALLEL
/* Retrieve up MPI parameters */
@@ -2435,7 +2465,7 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
size_t nbytes=(size_t)chunk_size;
/* Push the chunk through the filters */
- if (H5Z_pipeline(f, &pline, 0, &filter_mask, &nbytes, &buf_size, &chunk)<0)
+ if (H5Z_pipeline(f, &pline, 0, &filter_mask, edc, cb_struct, &nbytes, &buf_size, &chunk)<0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed");
/* Keep the number of bytes the chunk turned in to */
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 7aafe92..e299c56 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -800,6 +800,48 @@ done:
FUNC_LEAVE_API(ret_value);
}
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_adler32
+ *
+ * Purpose: Sets Adler32 checksum of EDC for a dataset creation
+ * property list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Raymond Lu
+ * Dec 19, 2002
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_adler32(hid_t plist_id)
+{
+ H5O_pline_t pline;
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value=SUCCEED; /* return value */
+
+ FUNC_ENTER_API(H5Pset_adler32, FAIL);
+ H5TRACE1("e","i",plist_id);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID");
+
+ /* Add the Adler32 checksum as a filter */
+ if(H5P_get(plist, H5D_CRT_DATA_PIPELINE_NAME, &pline) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get pipeline");
+ if(H5Z_append(&pline, H5Z_FILTER_ADLER32, H5Z_FLAG_MANDATORY, 0, NULL)<0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL, "unable to add deflate filter to pipeline");
+ if(H5P_set(plist, H5D_CRT_DATA_PIPELINE_NAME, &pline) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL, "unable to set pipeline");
+
+done:
+ FUNC_LEAVE_API(ret_value);
+}
+
/*-------------------------------------------------------------------------
* Function: H5Pset_fill_value
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index 33aa607..2f7bb82 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -421,6 +421,131 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Pset_edc_check
+ *
+ * Purpose: Enable or disable error-detecting for a dataset reading
+ * process. This error-detecting algorithm is whichever
+ * user chooses earlier. This function cannot control
+ * writing process.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Raymond Lu
+ * Jan 3, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_edc_check(hid_t plist_id, H5Z_EDC_t check)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value=SUCCEED; /* return value */
+
+ FUNC_ENTER_API(H5Pset_edc_check, FAIL);
+
+ /* Check argument */
+ if (check != H5Z_ENABLE_EDC && check != H5Z_DISABLE_EDC)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a valid value");
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_DATASET_XFER)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID");
+
+ /* Update property list */
+ if (H5P_set(plist,H5D_XFER_EDC_NAME,&check)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value");
+
+done:
+ FUNC_LEAVE_API(ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_edc_check
+ *
+ * Purpose: Enable or disable error-detecting for a dataset reading
+ * process. This error-detecting algorithm is whichever
+ * user chooses earlier. This function cannot control
+ * writing process.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Raymond Lu
+ * Jan 3, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+H5Z_EDC_t
+H5Pget_edc_check(hid_t plist_id)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5Z_EDC_t ret_value; /* return value */
+
+ FUNC_ENTER_API(H5Pget_edc_check, FAIL);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_DATASET_XFER)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID");
+
+ /* Update property list */
+ if (H5P_get(plist,H5D_XFER_EDC_NAME,&ret_value)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value");
+
+ /* check valid value */
+ if (ret_value != H5Z_ENABLE_EDC && ret_value != H5Z_DISABLE_EDC)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a valid value");
+
+done:
+ FUNC_LEAVE_API(ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_filter_callback
+ *
+ * Purpose: Sets user's callback function for dataset transfer property
+ * list. This callback function defines what user wants to do
+ * if certain filter fails.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Raymond Lu
+ * Jan 14, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_filter_callback(hid_t plist_id, H5Z_filter_func_t func, void* op_data)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value=SUCCEED; /* return value */
+ H5Z_cb_t cb_struct;
+
+ FUNC_ENTER_API(H5Pset_filter_callback, FAIL);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_DATASET_XFER)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID");
+
+ /* Update property list */
+ cb_struct.func = func;
+ cb_struct.op_data = op_data;
+
+ if (H5P_set(plist,H5D_XFER_FILTER_CB_NAME,&cb_struct)<0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set value");
+
+done:
+ FUNC_LEAVE_API(ret_value);
+}
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pget_btree_ratios
*
* Purpose: Queries B-tree split ratios. See H5Pset_btree_ratios().
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index abe8e63..fbe1aea 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -219,6 +219,11 @@ H5_DLL H5Z_filter_t H5Pget_filter(hid_t plist_id, int filter,
size_t namelen, char name[]);
H5_DLL herr_t H5Pset_deflate(hid_t plist_id, unsigned aggression);
H5_DLL herr_t H5Pset_shuffle(hid_t plist_id, unsigned bytespertype);
+H5_DLL herr_t H5Pset_adler32(hid_t plist_id);
+H5_DLL herr_t H5Pset_edc_check(hid_t plist_id, H5Z_EDC_t check);
+H5_DLL H5Z_EDC_t H5Pget_edc_check(hid_t plist_id);
+H5_DLL herr_t H5Pset_filter_callback(hid_t plist_id, H5Z_filter_func_t func,
+ void* op_data);
#ifdef H5_WANT_H5_V1_4_COMPAT
H5_DLL herr_t H5Pset_cache(hid_t plist_id, int mdc_nelmts, int rdcc_nelmts,
size_t rdcc_nbytes, double rdcc_w0);
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index b855d88..a6f8dbb 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -2239,7 +2239,7 @@ H5T_conv_vlen(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, hsize_t nelmts,
/* Get length of element sequences */
if((seq_len=(*(src->u.vlen.getlen))(src->u.vlen.f,s))<0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "null pointer");
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "incorrect length");
H5_CHECK_OVERFLOW(seq_len,hssize_t,size_t);
src_size=(size_t)seq_len*src_base_size;
dst_size=(size_t)seq_len*dst_base_size;
diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c
index 42e249a..fa65056 100644
--- a/src/H5Tvlen.c
+++ b/src/H5Tvlen.c
@@ -291,7 +291,10 @@ H5T_vlen_str_mem_getlen(H5F_t UNUSED *f, void *vl_addr)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "null pointer");
/* Set return value */
- ret_value=(hssize_t)HDstrlen(s);
+ if(s)
+ ret_value=(hssize_t)HDstrlen(s);
+ else
+ ret_value = 0;
done:
FUNC_LEAVE_NOAPI(ret_value);
@@ -324,7 +327,10 @@ H5T_vlen_str_mem_read(H5F_t UNUSED *f, void *vl_addr, void *buf, size_t len)
assert(s);
assert(buf);
- HDmemcpy(buf,s,len);
+ if(s && buf && len>0)
+ HDmemcpy(buf,s,len);
+ if(!s && len==-1)
+ buf = NULL;
done:
FUNC_LEAVE_NOAPI(ret_value);
diff --git a/src/H5Z.c b/src/H5Z.c
index 77ca9b6..c37be31 100644
--- a/src/H5Z.c
+++ b/src/H5Z.c
@@ -52,6 +52,9 @@ H5Z_init_interface (void)
#ifdef H5_HAVE_FILTER_SHUFFLE
H5Z_register (H5Z_FILTER_SHUFFLE, "shuffle", H5Z_filter_shuffle);
#endif /* H5_HAVE_FILTER_SHUFFLE */
+#ifdef H5_HAVE_FILTER_ADLER32
+ H5Z_register (H5Z_FILTER_ADLER32, "adler32", H5Z_filter_adler32);
+#endif /* H5_HAVE_FILTER_ADLER32 */
FUNC_LEAVE_NOAPI(SUCCEED);
}
@@ -492,12 +495,14 @@ done:
*/
herr_t
H5Z_pipeline(H5F_t UNUSED *f, const H5O_pline_t *pline, unsigned flags,
- unsigned *filter_mask/*in,out*/, size_t *nbytes/*in,out*/,
- size_t *buf_size/*in,out*/, void **buf/*in,out*/)
+ unsigned *filter_mask/*in,out*/, H5Z_EDC_t edc_read,
+ H5Z_cb_t cb_struct, size_t *nbytes/*in,out*/,
+ size_t *buf_size/*in,out*/, void **buf/*in,out*/)
{
size_t i, idx, new_nbytes;
H5Z_class_t *fclass=NULL;
unsigned failed = 0;
+ unsigned tmp_flags;
#ifdef H5Z_DEBUG
H5_timer_t timer;
#endif
@@ -513,7 +518,7 @@ H5Z_pipeline(H5F_t UNUSED *f, const H5O_pline_t *pline, unsigned flags,
assert(buf && *buf);
assert(!pline || pline->nfilters<32);
- if (pline && (flags & H5Z_FLAG_REVERSE)) {
+ if (pline && (flags & H5Z_FLAG_REVERSE)) { /* Read */
for (i=pline->nfilters; i>0; --i) {
idx = i-1;
@@ -528,22 +533,30 @@ H5Z_pipeline(H5F_t UNUSED *f, const H5O_pline_t *pline, unsigned flags,
#ifdef H5Z_DEBUG
H5_timer_begin(&timer);
#endif
- new_nbytes = (fclass->func)(flags|(pline->filter[idx].flags),
- pline->filter[idx].cd_nelmts,
- pline->filter[idx].cd_values,
- *nbytes, buf_size, buf);
+ tmp_flags=flags|(pline->filter[idx].flags);
+ tmp_flags|=(edc_read== H5Z_DISABLE_EDC) ? H5Z_FLAG_SKIP_EDC : 0;
+ new_nbytes = (fclass->func)(tmp_flags, pline->filter[idx].cd_nelmts,
+ pline->filter[idx].cd_values, *nbytes, buf_size, buf);
+
#ifdef H5Z_DEBUG
H5_timer_end(&(fclass->stats[1].timer), &timer);
fclass->stats[1].total += MAX(*nbytes, new_nbytes);
if (0==new_nbytes) fclass->stats[1].errors += *nbytes;
#endif
- if (0==new_nbytes) {
- failed |= (unsigned)1 << idx;
- HGOTO_ERROR(H5E_PLINE, H5E_READERROR, FAIL, "filter returned failure");
- }
- *nbytes = new_nbytes;
+
+ if(0==new_nbytes) {
+ if((cb_struct.func && (H5Z_CB_FAIL==cb_struct.func(pline->filter[idx].id, *buf, *buf_size, cb_struct.op_data)))
+ || !cb_struct.func) {
+ failed |= (unsigned)1 << idx;
+ HGOTO_ERROR(H5E_PLINE, H5E_READERROR, FAIL, "filter returned failure during read");
+ } else {
+ H5E_clear();
+ *nbytes = *buf_size;
+ }
+ } else
+ *nbytes = new_nbytes;
}
- } else if (pline) {
+ } else if (pline) { /* Write */
for (idx=0; idx<pline->nfilters; idx++) {
if (*filter_mask & ((unsigned)1<<idx)) {
failed |= (unsigned)1 << idx;
@@ -561,25 +574,29 @@ H5Z_pipeline(H5F_t UNUSED *f, const H5O_pline_t *pline, unsigned flags,
#ifdef H5Z_DEBUG
H5_timer_begin(&timer);
#endif
- new_nbytes = (fclass->func)(flags|(pline->filter[idx].flags),
- pline->filter[idx].cd_nelmts,
- pline->filter[idx].cd_values,
- *nbytes, buf_size, buf);
+ new_nbytes = (fclass->func)(flags|(pline->filter[idx].flags), pline->filter[idx].cd_nelmts,
+ pline->filter[idx].cd_values, *nbytes, buf_size, buf);
#ifdef H5Z_DEBUG
H5_timer_end(&(fclass->stats[0].timer), &timer);
fclass->stats[0].total += MAX(*nbytes, new_nbytes);
if (0==new_nbytes) fclass->stats[0].errors += *nbytes;
#endif
- if (0==new_nbytes) {
- failed |= (unsigned)1 << idx;
- if (0==(pline->filter[idx].flags & H5Z_FLAG_OPTIONAL)) {
- HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "filter returned failure");
- } else {
- H5E_clear();
- }
- } else {
- *nbytes = new_nbytes;
- }
+ if(0==new_nbytes) {
+ if (0==(pline->filter[idx].flags & H5Z_FLAG_OPTIONAL)) {
+ if((cb_struct.func && (H5Z_CB_FAIL==cb_struct.func(pline->filter[idx].id, *buf, *nbytes, cb_struct.op_data)))
+ || !cb_struct.func) {
+ failed |= (unsigned)1 << idx;
+ HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "filter returned failure");
+ } else {
+ H5E_clear();
+ *nbytes = *buf_size;
+ }
+ } else {
+ H5E_clear();
+ }
+ } else {
+ *nbytes = new_nbytes;
+ }
}
}
diff --git a/src/H5Zadler32.c b/src/H5Zadler32.c
new file mode 100644
index 0000000..53ce12d
--- /dev/null
+++ b/src/H5Zadler32.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright © 1999-2001 NCSA
+ * All rights reserved.
+ *
+ * Programmer: Raymond Lu<slu@ncsa.uiuc.edu>
+ * Jan 3, 2003
+ */
+#include "H5private.h"
+#include "H5Eprivate.h"
+#include "H5MMprivate.h"
+#include "H5Zprivate.h"
+
+#ifdef H5_HAVE_FILTER_ADLER32
+
+#define ADLER_LEN 4
+#define ADLER_BASE 65521
+
+/* Interface initialization */
+#define PABLO_MASK H5Z_adler32_mask
+#define INTERFACE_INIT NULL
+static int interface_initialize_g = 0;
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Z_filter_adler32_compute
+ *
+ * Purpose: Implement an Adler32 Checksum
+ *
+ * Return: Success: Adler32 value
+ *
+ * Failure: Can't fail
+ *
+ * Programmer: Raymond Lu
+ * Jan 3, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned int H5Z_filter_adler32_compute(unsigned char *buf, size_t len)
+{
+ size_t i;
+ register unsigned int s1 = 1;
+ register unsigned int s2 = 0;
+
+ FUNC_ENTER_NOINIT(H5Z_filter_adler32_compute);
+
+ /* Compute checksum */
+ for(i=0; i<len; i++) {
+ s1 = (s1 + *buf++) % ADLER_BASE;
+ s2 = (s2 + s1) % ADLER_BASE;
+ }
+
+ FUNC_LEAVE_NOAPI((s2 << 16) + s1);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Z_filter_adler32
+ *
+ * Purpose: Implement an I/O filter of Adler32 Checksum
+ *
+ * Return: Success: size of data plus the size of Adler32 value
+ *
+ * Failure: 0
+ *
+ * Programmer: Raymond Lu
+ * Jan 3, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+size_t
+H5Z_filter_adler32 (unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
+ size_t nbytes, size_t *buf_size, void **buf)
+{
+ size_t ret_value = 0;
+ void *outbuf = NULL;
+
+ unsigned char *src = (unsigned char*)(*buf);
+ unsigned int adler = 1;
+
+ FUNC_ENTER_NOAPI(H5Z_filter_adler32, 0);
+
+ assert(sizeof(unsigned int)==4);
+
+ if (flags & H5Z_FLAG_REVERSE) { /* Read */
+ size_t src_nbytes = nbytes;
+ unsigned int origin_adler;
+
+ /* Do checksum if it's enabled for read; otherwise skip it
+ * to save performance. */
+ if (!(flags & H5Z_FLAG_SKIP_EDC)) { /* Read */
+ unsigned char *tmp_src;
+
+ src_nbytes -= ADLER_LEN;
+ tmp_src=src+src_nbytes;
+ UINT32DECODE(tmp_src, origin_adler);
+
+ /* Compute checksum */
+ adler = H5Z_filter_adler32_compute(src,src_nbytes);
+
+ if(origin_adler != adler)
+ HGOTO_ERROR(H5E_STORAGE, H5E_READERROR, 0, "data error detected by Adler32 checksum");
+ }
+
+ *buf_size = nbytes - ADLER_LEN;
+ ret_value = *buf_size;
+ } else { /* Write */
+ unsigned char *dst;
+
+ /* Compute checksum */
+ adler = H5Z_filter_adler32_compute(src,nbytes);
+
+ if (NULL==(dst=outbuf=H5MM_malloc(nbytes+ADLER_LEN)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "unable to allocate Adler32 checksum destination buffer");
+
+ /* Copy raw data */
+ HDmemcpy((void*)dst, (void*)(*buf), nbytes);
+
+ /* Append checksum to raw data */
+ dst += nbytes;
+ UINT32ENCODE(dst, adler);
+
+ *buf_size = nbytes + ADLER_LEN;
+ H5MM_xfree(*buf);
+ *buf = outbuf;
+ outbuf = NULL;
+ ret_value = *buf_size;
+ }
+
+done:
+ if(outbuf)
+ H5MM_xfree(outbuf);
+ FUNC_LEAVE_NOAPI(ret_value);
+}
+
+#endif /* H5_HAVE_FILTER_ADLER32 */
diff --git a/src/H5Zdeflate.c b/src/H5Zdeflate.c
index 3c91836..c147533 100644
--- a/src/H5Zdeflate.c
+++ b/src/H5Zdeflate.c
@@ -21,6 +21,8 @@
#define INTERFACE_INIT NULL
static int interface_initialize_g = 0;
+#define H5Z_DEFLATE_SIZE_ADJUST(s) (HDceil((double)((s)*1.001))+12)
+
/*-------------------------------------------------------------------------
* Function: H5Z_filter_deflate
@@ -103,10 +105,10 @@ H5Z_filter_deflate (unsigned flags, size_t cd_nelmts,
*/
const Bytef *z_src = (const Bytef*)(*buf);
Bytef *z_dst; /*destination buffer */
- uLongf z_dst_nbytes = (uLongf)nbytes;
+ uLongf z_dst_nbytes = (uLongf)H5Z_DEFLATE_SIZE_ADJUST(nbytes);
uLong z_src_nbytes = (uLong)nbytes;
-
- if (NULL==(z_dst=outbuf=H5MM_malloc(nbytes)))
+
+ if (NULL==(z_dst=outbuf=H5MM_malloc(z_dst_nbytes)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "unable to allocate deflate destination buffer");
status = compress2 (z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
if (Z_BUF_ERROR==status) {
@@ -114,7 +116,7 @@ H5Z_filter_deflate (unsigned flags, size_t cd_nelmts,
} else if (Z_MEM_ERROR==status) {
HGOTO_ERROR (H5E_PLINE, H5E_CANTINIT, 0, "deflate memory error");
} else if (Z_OK!=status) {
- HGOTO_ERROR (H5E_PLINE, H5E_CANTINIT, 0, "deflate error");
+ HGOTO_ERROR (H5E_PLINE, H5E_CANTINIT, 0, "other deflate error");
} else {
H5MM_xfree(*buf);
*buf = outbuf;
diff --git a/src/H5Zprivate.h b/src/H5Zprivate.h
index 5f05dc8..b150d0f 100644
--- a/src/H5Zprivate.h
+++ b/src/H5Zprivate.h
@@ -10,6 +10,7 @@
#include "H5Zpublic.h"
#include "H5Fprivate.h"
+#include "H5Ppublic.h"
/*
* The filter table maps filter identification numbers to structs that
@@ -39,8 +40,9 @@ H5_DLL herr_t H5Z_append(struct H5O_pline_t *pline, H5Z_filter_t filter,
const unsigned int cd_values[]);
H5_DLL herr_t H5Z_pipeline(H5F_t *f, const struct H5O_pline_t *pline,
unsigned flags, unsigned *filter_mask/*in,out*/,
- size_t *nbytes/*in,out*/,
- size_t *buf_size/*in,out*/, void **buf/*in,out*/);
+ H5Z_EDC_t edc_read, H5Z_cb_t cb_struct,
+ size_t *nbytes/*in,out*/, size_t *buf_size/*in,out*/,
+ void **buf/*in,out*/);
H5_DLL H5Z_class_t *H5Z_find(H5Z_filter_t id);
@@ -52,4 +54,9 @@ H5_DLL size_t H5Z_filter_deflate(unsigned flags, size_t cd_nelmts,
H5_DLL size_t H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts,
const unsigned cd_values[], size_t nbytes,
size_t *buf_size, void **buf);
+
+H5_DLL size_t H5Z_filter_adler32(unsigned flags, size_t cd_nelmts,
+ const unsigned cd_values[], size_t nbytes,
+ size_t *buf_size, void **buf);
+
#endif
diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h
index 67b1fe9..f45c96c 100644
--- a/src/H5Zpublic.h
+++ b/src/H5Zpublic.h
@@ -19,17 +19,46 @@ typedef int H5Z_filter_t;
#define H5Z_FILTER_ERROR (-1) /*no filter */
#define H5Z_FILTER_NONE 0 /*reserved indefinitely */
#define H5Z_FILTER_DEFLATE 1 /*deflation like gzip */
-#define H5Z_FILTER_SHUFFLE 2 /* shuffle the data */
+#define H5Z_FILTER_SHUFFLE 2 /*shuffle the data */
+#define H5Z_FILTER_ADLER32 3 /*adler32 checksum of EDC */
#define H5Z_FILTER_RESERVED 256 /*filter ids below this value are reserved */
#define H5Z_FILTER_MAX 65535 /*maximum filter id */
/* Flags for filter definition */
#define H5Z_FLAG_DEFMASK 0x00ff /*definition flag mask */
+#define H5Z_FLAG_MANDATORY 0x0000 /*filter is mandatory */
#define H5Z_FLAG_OPTIONAL 0x0001 /*filter is optional */
/* Additional flags for filter invocation */
#define H5Z_FLAG_INVMASK 0xff00 /*invocation flag mask */
#define H5Z_FLAG_REVERSE 0x0100 /*reverse direction; read */
+#define H5Z_FLAG_SKIP_EDC 0x0200 /*skip EDC filters for read */
+
+/* Values to decide if EDC is enabled for reading data */
+typedef enum H5Z_EDC_t {
+ H5Z_ERROR_EDC = -1, /* error value */
+ H5Z_DISABLE_EDC = 0,
+ H5Z_ENABLE_EDC = 1,
+ H5Z_NO_EDC = 2 /* must be the last */
+} H5Z_EDC_t;
+
+/* Return values for filter callback function */
+typedef enum H5Z_cb_return_t {
+ H5Z_CB_ERROR = -1,
+ H5Z_CB_FAIL = 0, /* I/O should fail if filter fails. */
+ H5Z_CB_CONT = 1, /* I/O continues if filter fails. */
+ H5Z_CB_NO = 2
+} H5Z_cb_return_t;
+
+/* Filter callback function definition */
+typedef H5Z_cb_return_t (*H5Z_filter_func_t)(H5Z_filter_t filter, void* buf,
+ size_t buf_size, void* op_data);
+
+/* Structure for filter callback property */
+typedef struct H5Z_cb_t {
+ H5Z_filter_func_t func;
+ void* op_data;
+} H5Z_cb_t;
#ifdef __cplusplus
extern "C" {
diff --git a/src/H5Zshuffle.c b/src/H5Zshuffle.c
index f25b5ff..a33d3cd 100644
--- a/src/H5Zshuffle.c
+++ b/src/H5Zshuffle.c
@@ -42,9 +42,8 @@ static int interface_initialize_g = 0;
*-------------------------------------------------------------------------
*/
size_t
-H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts,
- const unsigned cd_values[], size_t nbytes,
- size_t *buf_size, void **buf)
+H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
+ size_t nbytes, size_t *buf_size, void **buf)
{
void *dest = NULL; /* Buffer to deposit [un]shuffled bytes into */
unsigned char *_src; /* Alias for source buffer */
@@ -52,6 +51,7 @@ H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts,
unsigned bytesoftype; /* Number of bytes per element */
size_t numofelements; /* Number of elements in buffer */
size_t i,j; /* Local index variables */
+ size_t leftover; /* Extra bytes at end of buffer */
size_t ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5Z_filter_shuffle, 0);
@@ -68,6 +68,9 @@ H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts,
/* Compute the number of elements in buffer */
numofelements=nbytes/bytesoftype;
+ /* Compute the leftover bytes if there are any */
+ leftover = nbytes%bytesoftype;
+
/* Allocate the destination buffer */
if (NULL==(dest = H5MM_malloc(nbytes)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed for shuffle buffer");
@@ -84,6 +87,13 @@ H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts,
_dest+=bytesoftype;
} /* end for */
} /* end for */
+
+ /* Add leftover to the end of data */
+ if(leftover>0) {
+ /* Adjust back to end of shuffled bytes */
+ _dest -= (bytesoftype - 1);
+ HDmemcpy((void*)_dest, (void*)_src, leftover);
+ }
} /* end if */
else {
/* Get the pointer to the destination buffer */
@@ -97,6 +107,13 @@ H5Z_filter_shuffle(unsigned flags, size_t cd_nelmts,
_src+=bytesoftype;
} /* end for */
} /* end for */
+
+ /* Add leftover to the end of data */
+ if(leftover>0) {
+ /* Adjust back to end of shuffled bytes */
+ _src -= (bytesoftype - 1);
+ HDmemcpy((void*)_dest, (void*)_src, leftover);
+ }
} /* end else */
/* Set the buffer information to return */
diff --git a/src/H5config.h.in b/src/H5config.h.in
index 09d84e7..1b7f58c 100644
--- a/src/H5config.h.in
+++ b/src/H5config.h.in
@@ -27,6 +27,9 @@
/* Define if support for shuffle filter is enabled */
#undef HAVE_FILTER_SHUFFLE
+/* Define if support for Adler32 checksum filter is enabled */
+#undef HAVE_FILTER_ADLER32
+
/* Define to 1 if you have the `fork' function. */
#undef HAVE_FORK
diff --git a/src/Makefile.in b/src/Makefile.in
index 95a0fc5..6b28099 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -39,7 +39,7 @@ LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5D.c H5E.c H5F.c H5Farray.c H5Fcontig.c \
H5P.c H5Pdcpl.c H5Pdxpl.c H5Pfapl.c H5Pfcpl.c H5R.c H5RS.c H5S.c \
H5Sall.c H5Shyper.c H5Smpio.c H5Snone.c H5Spoint.c H5Sselect.c H5ST.c \
H5T.c H5Tbit.c H5Tconv.c H5Tinit.c H5Tvlen.c H5TB.c H5TS.c H5V.c \
- H5Z.c H5Zdeflate.c H5Zshuffle.c
+ H5Z.c H5Zdeflate.c H5Zshuffle.c H5Zadler32.c
LIB_OBJ=$(LIB_SRC:.c=.lo)
diff --git a/test/dsets.c b/test/dsets.c
index e41748e..db51344 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -28,13 +28,23 @@ const char *FILENAME[] = {
#define DSET_TCONV_NAME "tconv"
#define DSET_DEFLATE_NAME "deflate"
#define DSET_SHUFFLE_NAME "shuffle"
-#define DSET_SHUFFLE_DEFLATE_NAME "shuffle+deflate"
+#define DSET_ADLER32_NAME "adler32"
+#define DSET_ADLER32_NAME_2 "adler32_2"
+#define DSET_ADLER32_NAME_3 "adler32_3"
+#define DSET_SHUF_DEF_FLET_NAME "shuffle+deflate+adler32"
+#define DSET_SHUF_DEF_FLET_NAME_2 "shuffle+deflate+adler32_2"
#define DSET_BOGUS_NAME "bogus"
#define DSET_MISSING_NAME "missing"
#define DSET_ONEBYTE_SHUF_NAME "onebyte_shuffle"
#define USER_BLOCK 512
#define H5Z_BOGUS 305
+#define H5Z_CORRUPT 306
+
+#define DISABLE_ADLER32 0
+#define ENABLE_ADLER32 1
+#define DATA_CORRUPTED 1
+#define DATA_NOT_CORRUPTED 0
/* Shared global arrays */
int points[100][200], check[100][200];
@@ -594,7 +604,7 @@ test_tconv(hid_t file)
*-------------------------------------------------------------------------
*/
static size_t
-bogus(unsigned int UNUSED flags, size_t UNUSED cd_nelmts,
+bogus(unsigned int UNUSED flags, size_t UNUSED cd_nelmts,
const unsigned int UNUSED *cd_values, size_t nbytes,
size_t UNUSED *buf_size, void UNUSED **buf)
{
@@ -603,7 +613,114 @@ bogus(unsigned int UNUSED flags, size_t UNUSED cd_nelmts,
/*-------------------------------------------------------------------------
- * Function: test_compression_internal
+ * Function: corrupt_data
+ *
+ * Purpose: For testing Adler32 checksum. modify data slightly during
+ * writing so that when data is read back, the checksum should
+ * fail.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *
+ * Programmer: Raymond Lu
+ * Jan 14, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+corrupt_data(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes,
+ size_t *buf_size, void **buf)
+{
+ size_t ret_value = 0;
+ unsigned char *dst = (unsigned char*)(*buf);
+ unsigned int offset;
+ unsigned int length;
+ unsigned int value;
+ unsigned char *corrupt_data;
+ int i;
+
+ if (cd_nelmts!=3 || !cd_values)
+ return 0;
+ offset = cd_values[0];
+ length = cd_values[1];
+ value = cd_values[2];
+ if(offset>nbytes || (offset+length)>nbytes || length<sizeof(unsigned int))
+ return 0;
+
+ corrupt_data = (unsigned char*)HDmalloc(length);
+ HDmemset((void*)corrupt_data, value, length);
+
+ if (flags & H5Z_FLAG_REVERSE) { /* Varify data is actually corrupted during read */
+ dst += offset;
+ if(HDmemcmp(corrupt_data, dst, length)!=0) return 0;
+ *buf_size = nbytes;
+ ret_value = nbytes;
+ } else { /* Write corrupted data */
+ dst += offset;
+ HDmemcpy(dst, corrupt_data, length);
+ *buf_size = nbytes;
+ ret_value = *buf_size;
+ }
+
+ if(corrupt_data)
+ HDfree(corrupt_data);
+
+ return ret_value;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: filter_cb_cont
+ *
+ * Purpose: Callback function to handle checksum failure. Let it continue.
+ *
+ * Return: continue
+ *
+ * Programmer: Raymond Lu
+ * Jan 14, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5Z_cb_return_t
+filter_cb_cont(H5Z_filter_t filter, void* UNUSED buf, size_t UNUSED buf_size,
+ void* UNUSED op_data)
+{
+ if(H5Z_FILTER_ADLER32==filter)
+ return H5Z_CB_CONT;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: filter_cb_fail
+ *
+ * Purpose: Callback function to handle checksum failure. Let it fail.
+ *
+ * Return: fail
+ *
+ * Programmer: Raymond Lu
+ * Jan 14, 2003
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5Z_cb_return_t
+filter_cb_fail(H5Z_filter_t filter, void* UNUSED buf, size_t UNUSED buf_size,
+ void* UNUSED op_data)
+{
+ if(H5Z_FILTER_ADLER32==filter)
+ return H5Z_CB_FAIL;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_filter_internal
*
* Purpose: Tests dataset compression. If compression is requested when
* it hasn't been compiled into the library (such as when
@@ -623,7 +740,8 @@ bogus(unsigned int UNUSED flags, size_t UNUSED cd_nelmts,
*-------------------------------------------------------------------------
*/
static herr_t
-test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset_size)
+test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_adler32,
+ int corrupted, hsize_t *dset_size)
{
hid_t dataset; /* Dataset ID */
hid_t dxpl; /* Dataset xfer property list ID */
@@ -633,6 +751,7 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
const hsize_t hs_size[2] = {4, 50}; /* Hyperslab size */
void *tconv_buf = NULL; /* Temporary conversion buffer */
hsize_t i, j, n; /* Local index variables */
+ herr_t status; /* Error status */
/* Create the data space */
if ((sid = H5Screate_simple(2, size, NULL))<0) goto error;
@@ -644,8 +763,13 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
if ((dxpl = H5Pcreate (H5P_DATASET_XFER))<0) goto error;
tconv_buf = malloc (1000);
if (H5Pset_buffer (dxpl, 1000, tconv_buf, NULL)<0) goto error;
-
- TESTING("compression (setup)");
+ if (if_adler32==DISABLE_ADLER32) {
+ if(H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC)<0)
+ goto error;
+ if(H5Z_DISABLE_EDC != H5Pget_edc_check(dxpl))
+ goto error;
+ }
+ TESTING(" filters (setup)");
/* Create the dataset */
if ((dataset = H5Dcreate(fid, name, H5T_NATIVE_INT, sid,
@@ -656,7 +780,7 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
* STEP 1: Read uninitialized data. It should be zero.
*----------------------------------------------------------------------
*/
- TESTING("compression (uninitialized read)");
+ TESTING(" filters (uninitialized read)");
if (H5Dread (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
goto error;
@@ -675,11 +799,11 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
PASSED();
/*----------------------------------------------------------------------
- * STEP 2: Test compression by setting up a chunked dataset and writing
+ * STEP 2: Test filters by setting up a chunked dataset and writing
* to it.
*----------------------------------------------------------------------
*/
- TESTING("compression (write)");
+ TESTING(" filters (write)");
for (i=n=0; i<size[0]; i++) {
for (j=0; j<size[1]; j++) {
@@ -689,30 +813,54 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
if (H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, points)<0)
goto error;
+
+ if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
+
PASSED();
/*----------------------------------------------------------------------
* STEP 3: Try to read the data we just wrote.
*----------------------------------------------------------------------
*/
- TESTING("compression (read)");
+ TESTING(" filters (read)");
/* Read the dataset back */
- if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for (i=0; i<size[0]; i++) {
- for (j=0; j<size[1]; j++) {
- if (points[i][j] != check[i][j]) {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)i, (unsigned long)j);
- goto error;
- }
- }
+ if(corrupted) {
+ /* Default behavior is failure when data is corrupted. */
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+
+ /* Callback decides to continue inspite data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Callback decides to fail when data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+ } else {
+ if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Check that the values read are the same as the values written */
+ for (i=0; i<size[0]; i++) {
+ for (j=0; j<size[1]; j++) {
+ if (points[i][j] != check[i][j]) {
+ H5_FAILED();
+ printf(" Read different values than written.\n");
+ printf(" At index %lu,%lu\n",
+ (unsigned long)i, (unsigned long)j);
+ goto error;
+ }
+ }
+ }
}
+
PASSED();
/*----------------------------------------------------------------------
@@ -722,7 +870,7 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
* dataset although we rewrite the whole thing.
*----------------------------------------------------------------------
*/
- TESTING("compression (modify)");
+ TESTING(" filters (modify)");
for (i=0; i<size[0]; i++) {
for (j=0; j<size[1]/2; j++) {
@@ -731,50 +879,95 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
}
if (H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, points)<0)
goto error;
-
- /* Read the dataset back and check it */
- if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for (i=0; i<size[0]; i++) {
- for (j=0; j<size[1]; j++) {
- if (points[i][j] != check[i][j]) {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)i, (unsigned long)j);
- goto error;
- }
- }
+
+ if(corrupted) {
+ /* Default behavior is failure when data is corrupted. */
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+
+ /* Callback decides to continue inspite data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Callback decides to fail when data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+ } else {
+ /* Read the dataset back and check it */
+ if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Check that the values read are the same as the values written */
+ for (i=0; i<size[0]; i++) {
+ for (j=0; j<size[1]; j++) {
+ if (points[i][j] != check[i][j]) {
+ H5_FAILED();
+ printf(" Read different values than written.\n");
+ printf(" At index %lu,%lu\n",
+ (unsigned long)i, (unsigned long)j);
+ goto error;
+ }
+ }
+ }
}
+
+ if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
+
PASSED();
/*----------------------------------------------------------------------
* STEP 5: Close the dataset and then open it and read it again. This
- * insures that the compression message is picked up properly from the
+ * insures that the filters message is picked up properly from the
* object header.
*----------------------------------------------------------------------
*/
- TESTING("compression (re-open)");
+ TESTING(" filters (re-open)");
if (H5Dclose (dataset)<0) goto error;
if ((dataset = H5Dopen (fid, name))<0) goto error;
- if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
- goto error;
-
- /* Check that the values read are the same as the values written */
- for (i=0; i<size[0]; i++) {
- for (j=0; j<size[1]; j++) {
- if (points[i][j] != check[i][j]) {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)i, (unsigned long)j);
- goto error;
- }
- }
+
+ if(corrupted) {
+ /* Default behavior is failure when data is corrupted. */
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+
+ /* Callback decides to continue inspite data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Callback decides to fail when data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+ } else {
+ if (H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Check that the values read are the same as the values written */
+ for (i=0; i<size[0]; i++) {
+ for (j=0; j<size[1]; j++) {
+ if (points[i][j] != check[i][j]) {
+ H5_FAILED();
+ printf(" Read different values than written.\n");
+ printf(" At index %lu,%lu\n",
+ (unsigned long)i, (unsigned long)j);
+ goto error;
+ }
+ }
+ }
}
+
PASSED();
@@ -784,7 +977,7 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
* boundaries (we know that case already works from above tests).
*----------------------------------------------------------------------
*/
- TESTING("compression (partial I/O)");
+ TESTING(" filters (partial I/O)");
for (i=0; i<hs_size[0]; i++) {
for (j=0; j<hs_size[1]; j++) {
@@ -795,32 +988,53 @@ test_compression_internal(hid_t fid, const char *name, hid_t dcpl, hsize_t *dset
NULL)<0) goto error;
if (H5Dwrite (dataset, H5T_NATIVE_INT, sid, sid, dxpl, points)<0)
goto error;
- if (H5Dread (dataset, H5T_NATIVE_INT, sid, sid, dxpl, check)<0)
- goto error;
+
+ if(corrupted) {
+ /* Default behavior is failure when data is corrupted. */
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+
+ /* Callback decides to continue inspite data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_cont, NULL)<0) goto error;
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check)<0)
+ goto error;
+
+ /* Callback decides to fail when data is corrupted. */
+ if(H5Pset_filter_callback(dxpl, filter_cb_fail, NULL)<0) goto error;
+ H5E_BEGIN_TRY {
+ status=H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check);
+ } H5E_END_TRY;
+ if(status>=0) goto error;
+ } else {
+ if (H5Dread (dataset, H5T_NATIVE_INT, sid, sid, dxpl, check)<0)
+ goto error;
- /* Check that the values read are the same as the values written */
- for (i=0; i<hs_size[0]; i++) {
- for (j=0; j<hs_size[1]; j++) {
- if (points[hs_offset[0]+i][hs_offset[1]+j] !=
- check[hs_offset[0]+i][hs_offset[1]+j]) {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)(hs_offset[0]+i),
- (unsigned long)(hs_offset[1]+j));
- printf(" At original: %d\n",
- (int)points[hs_offset[0]+i][hs_offset[1]+j]);
- printf(" At returned: %d\n",
- (int)check[hs_offset[0]+i][hs_offset[1]+j]);
- goto error;
- }
- }
+ /* Check that the values read are the same as the values written */
+ for (i=0; i<hs_size[0]; i++) {
+ for (j=0; j<hs_size[1]; j++) {
+ if (points[hs_offset[0]+i][hs_offset[1]+j] !=
+ check[hs_offset[0]+i][hs_offset[1]+j]) {
+ H5_FAILED();
+ printf(" Read different values than written.\n");
+ printf(" At index %lu,%lu\n",
+ (unsigned long)(hs_offset[0]+i),
+ (unsigned long)(hs_offset[1]+j));
+ printf(" At original: %d\n",
+ (int)points[hs_offset[0]+i][hs_offset[1]+j]);
+ printf(" At returned: %d\n",
+ (int)check[hs_offset[0]+i][hs_offset[1]+j]);
+ goto error;
+ }
+ }
+ }
}
+
PASSED();
/* Get the storage size of the dataset */
if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
-
/* Clean up objects used for this test */
if (H5Dclose (dataset)<0) goto error;
if (H5Sclose (sid)<0) goto error;
@@ -835,9 +1049,9 @@ error:
/*-------------------------------------------------------------------------
- * Function: test_compression
+ * Function: test_filters
*
- * Purpose: Tests dataset compression.
+ * Purpose: Tests dataset filter.
*
* Return: Success: 0
* Failure: -1
@@ -846,27 +1060,34 @@ error:
* Wednesday, April 15, 1998
*
* Modifications:
- * Moved guts of compression testing out of main routine.
- * Also added tests for shuffle filter.
+ * Moved guts of filter testing out of main routine.
+ * Tests shuffle, deflate, adler32 checksum filters.
* Quincey Koziol, November 14, 2002
*
+ * Added Adler32 filter testing
+ * Raymond Lu, Jan 22, 2002
+ *
*-------------------------------------------------------------------------
*/
static herr_t
-test_compression(hid_t file)
+test_filters(hid_t file)
{
hid_t dc; /* Dataset creation property list ID */
const hsize_t chunk_size[2] = {2, 25}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset with null filter */
+#ifdef H5_HAVE_FILTER_ADLER32
+ hsize_t adler32_size; /* Size of dataset with Adler32 checksum */
+ unsigned data_corrupt[3]; /* position and length of data to be corrupted */
+#endif /* H5_HAVE_FILTER_ADLER32 */
#ifdef H5_HAVE_FILTER_DEFLATE
hsize_t deflate_size; /* Size of dataset with deflate filter */
#endif /* H5_HAVE_FILTER_DEFLATE */
#ifdef H5_HAVE_FILTER_SHUFFLE
hsize_t shuffle_size; /* Size of dataset with shuffle filter */
#endif /* H5_HAVE_FILTER_SHUFFLE */
-#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE
- hsize_t shuff_def_size; /* Size of dataset with shuffle+deflate filter */
-#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE */
+#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_ADLER32
+ hsize_t combo_size; /* Size of dataset with shuffle+deflate filter */
+#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_ADLER32 */
/* Test null I/O filter (by itself) */
puts("Testing 'null' filter");
@@ -875,44 +1096,95 @@ test_compression(hid_t file)
if (H5Zregister (H5Z_BOGUS, "bogus", bogus)<0) goto error;
if (H5Pset_filter (dc, H5Z_BOGUS, 0, 0, NULL)<0) goto error;
- if(test_compression_internal(file,DSET_BOGUS_NAME,dc,&null_size)<0) goto error;
+ if(test_filter_internal(file,DSET_BOGUS_NAME,dc,DISABLE_ADLER32,DATA_NOT_CORRUPTED,&null_size)<0) goto error;
/* Clean up objects used for this test */
if (H5Pclose (dc)<0) goto error;
-#ifdef H5_HAVE_FILTER_DEFLATE
- /* Test deflate I/O filter (by itself) */
- puts("Testing deflate filter");
+ /*----------------------------------------------------------
+ * STEP 1: Test Adler32 Checksum by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_ADLER32
+ puts("Testing Adler32 checksum(enabled for read)");
if((dc = H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
if (H5Pset_chunk (dc, 2, chunk_size)<0) goto error;
- if (H5Pset_deflate (dc, 6)<0) goto error;
+ if (H5Pset_filter (dc,H5Z_FILTER_ADLER32,0,0,NULL)<0) goto error;
- if(test_compression_internal(file,DSET_DEFLATE_NAME,dc,&deflate_size)<0) goto error;
- if(deflate_size>=null_size) {
+ /* Enable checksum during read */
+ if(test_filter_internal(file,DSET_ADLER32_NAME,dc,ENABLE_ADLER32,DATA_NOT_CORRUPTED,&adler32_size)<0) goto error;
+ if(adler32_size<=null_size) {
H5_FAILED();
- puts(" Deflated size greater than uncompressed size.");
+ puts(" Size after checksumming is incorrect.");
+ goto error;
+ } /* end if */
+
+ /* Disable checksum during read */
+ puts("Testing Adler32 checksum(disabled for read)");
+ if(test_filter_internal(file,DSET_ADLER32_NAME_2,dc,DISABLE_ADLER32,DATA_NOT_CORRUPTED,&adler32_size)<0) goto error;
+ if(adler32_size<=null_size) {
+ H5_FAILED();
+ puts(" Size after checksumming is incorrect.");
+ goto error;
+ } /* end if */
+
+ /* Try to corrupt data and see if checksum fails */
+ puts("Testing Adler32 checksum(when data is corrupted)");
+ data_corrupt[0] = 52;
+ data_corrupt[1] = 33;
+ data_corrupt[2] = 27;
+
+ if (H5Zregister (H5Z_CORRUPT, "corrupt", corrupt_data)<0) goto error;
+ if (H5Pset_filter (dc, H5Z_CORRUPT, 0, 3, data_corrupt)<0) goto error;
+ if(test_filter_internal(file,DSET_ADLER32_NAME_3,dc,ENABLE_ADLER32,DATA_CORRUPTED,&adler32_size)<0) goto error;
+ if(adler32_size<=null_size) {
+ H5_FAILED();
+ puts(" Size after checksumming is incorrect.");
goto error;
} /* end if */
/* Clean up objects used for this test */
if (H5Pclose (dc)<0) goto error;
+#else /* H5_HAVE_FILTER_ADLER32 */
+ TESTING("adler32 checksum");
+ SKIPPED();
+ puts("adler32 checksum not enabled");
+#endif /* H5_HAVE_FILTER_ADLER32 */
+
+ /*----------------------------------------------------------
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ puts("Testing deflate filter");
+ if((dc = H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
+ if (H5Pset_chunk (dc, 2, chunk_size)<0) goto error;
+ if (H5Pset_deflate (dc, 6)<0) goto error;
+
+ if(test_filter_internal(file,DSET_DEFLATE_NAME,dc,DISABLE_ADLER32,DATA_NOT_CORRUPTED,&deflate_size)<0) goto error;
+ /* Clean up objects used for this test */
+ if (H5Pclose (dc)<0) goto error;
#else /* H5_HAVE_FILTER_DEFLATE */
TESTING("deflate filter");
SKIPPED();
puts("Deflate filter not enabled");
#endif /* H5_HAVE_FILTER_DEFLATE */
+ /*----------------------------------------------------------
+ * STEP 3: Test shuffling by itself.
+ *----------------------------------------------------------
+ */
#ifdef H5_HAVE_FILTER_SHUFFLE
- /* Test shuffle I/O filter (by itself) */
puts("Testing shuffle filter");
if((dc = H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
if (H5Pset_chunk (dc, 2, chunk_size)<0) goto error;
if (H5Pset_shuffle (dc, sizeof(int))<0) goto error;
- if(test_compression_internal(file,DSET_SHUFFLE_NAME,dc,&shuffle_size)<0) goto error;
+ if(test_filter_internal(file,DSET_SHUFFLE_NAME,dc,DISABLE_ADLER32,DATA_NOT_CORRUPTED,&shuffle_size)<0) goto error;
if(shuffle_size!=null_size) {
H5_FAILED();
puts(" Shuffled size not the same as uncompressed size.");
+printf("shuffle_size=%ld, null_size=%ld\n", shuffle_size, null_size);
goto error;
} /* end if */
@@ -924,29 +1196,39 @@ test_compression(hid_t file)
puts("Shuffle filter not enabled");
#endif /* H5_HAVE_FILTER_SHUFFLE */
-#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE
- /* Test combination of deflate & shuffle I/O filters */
- puts("Testing shuffle+deflate filters");
+ /*----------------------------------------------------------
+ * STEP 4: Test shuffle + deflate + checksum in any order.
+ *----------------------------------------------------------
+ */
+#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_ADLER32
+ puts("Testing shuffle+deflate+checksum filters(checksum first)");
if((dc = H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
if (H5Pset_chunk (dc, 2, chunk_size)<0) goto error;
+ if (H5Pset_adler32 (dc)<0) goto error;
if (H5Pset_shuffle (dc, sizeof(int))<0) goto error;
if (H5Pset_deflate (dc, 6)<0) goto error;
- if(test_compression_internal(file,DSET_SHUFFLE_DEFLATE_NAME,dc,&shuff_def_size)<0) goto error;
- if(shuff_def_size>=deflate_size) {
- H5_FAILED();
- puts(" Shuffle+deflate size greater than plain deflated size.");
- goto error;
- } /* end if */
+ if(test_filter_internal(file,DSET_SHUF_DEF_FLET_NAME,dc,ENABLE_ADLER32,DATA_NOT_CORRUPTED,&combo_size)<0) goto error;
/* Clean up objects used for this test */
if (H5Pclose (dc)<0) goto error;
-#else /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE */
- TESTING("shuffle+deflate filters");
- SKIPPED();
- puts("Deflate or shuffle filter not enabled");
-#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE */
+ puts("Testing shuffle+deflate+checksum filters(checksum last)");
+ if((dc = H5Pcreate(H5P_DATASET_CREATE))<0) goto error;
+ if (H5Pset_chunk (dc, 2, chunk_size)<0) goto error;
+ if (H5Pset_shuffle (dc, sizeof(int))<0) goto error;
+ if (H5Pset_deflate (dc, 6)<0) goto error;
+ if (H5Pset_adler32 (dc)<0) goto error;
+
+ if(test_filter_internal(file,DSET_SHUF_DEF_FLET_NAME_2,dc,ENABLE_ADLER32,DATA_NOT_CORRUPTED,&combo_size)<0) goto error;
+
+ /* Clean up objects used for this test */
+ if (H5Pclose (dc)<0) goto error;
+#else /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_ADLER32 */
+ TESTING("shuffle+deflate+adler32 filters");
+ SKIPPED();
+ puts("Deflate, shuffle, or Adler32 checksum filter not enabled");
+#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_ADLER32 */
return 0;
error:
@@ -1532,13 +1814,13 @@ main(void)
nerrors += test_simple_io(file, filename)<0 ?1:0;
nerrors += test_compact_io(fapl)<0 ?1:0;
nerrors += test_tconv(file)<0 ?1:0;
- nerrors += test_compression(file)<0 ?1:0;
- nerrors += test_missing_filter(file)<0 ?1:0;
+ nerrors += test_filters(file)<0 ?1:0;
nerrors += test_onebyte_shuffle(file)<0 ?1:0;
nerrors += test_multiopen (file)<0 ?1:0;
nerrors += test_types(file)<0 ?1:0;
nerrors += test_userblock_offset(fapl)<0 ?1:0;
-
+ nerrors += test_missing_filter(file)<0 ?1:0;
+
if (H5Fclose(file)<0) goto error;
if (nerrors) goto error;
printf("All dataset tests passed.\n");