diff options
author | Raymond Lu <songyulu@hdfgroup.org> | 2012-08-08 16:08:27 (GMT) |
---|---|---|
committer | Raymond Lu <songyulu@hdfgroup.org> | 2012-08-08 16:08:27 (GMT) |
commit | faba610060cb168d97a7b9c01d95688542e2cf28 (patch) | |
tree | e37bba310e7129ae110f7f0e4ec5eb653601e8da /test | |
parent | 8a0b4729cdc7b9edb18e84b2b9182228bd6eaa2e (diff) | |
download | hdf5-faba610060cb168d97a7b9c01d95688542e2cf28.zip hdf5-faba610060cb168d97a7b9c01d95688542e2cf28.tar.gz hdf5-faba610060cb168d97a7b9c01d95688542e2cf28.tar.bz2 |
[svn-r22641] Dectris project: I revised the code per Quincey's and Neil's comments. I added a performance benchmark program dectris_perf.c in the test/ directory.
Tested on koala and jam.
Diffstat (limited to 'test')
-rw-r--r-- | test/Makefile.am | 4 | ||||
-rw-r--r-- | test/Makefile.in | 47 | ||||
-rw-r--r-- | test/dectris_perf.c | 535 | ||||
-rw-r--r-- | test/dectris_tst.c | 79 |
4 files changed, 640 insertions, 25 deletions
diff --git a/test/Makefile.am b/test/Makefile.am index 0197e99..006e24b 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -40,7 +40,7 @@ TEST_PROG= testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \ pool accum hyperslab istore bittests dt_arith \ dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \ big mtime fillval mount flush1 flush2 app_ref enum \ - set_extent ttsafe dectris_tst \ + set_extent ttsafe dectris_tst dectris_perf \ getname vfd ntypes dangle dtransform reserved cross_read \ freespace mf farray earray btree2 fheap file_image @@ -128,7 +128,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse earray.h5 efc[0-5].h5 log_vfd_out.log \ new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \ split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \ - file_image_core_test.h5.copy dectris.h5 + file_image_core_test.h5.copy dectris.h5 dectris_perf.h5 # Sources for testhdf5 executable testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \ diff --git a/test/Makefile.in b/test/Makefile.in index ddc481e..21706d7 100644 --- a/test/Makefile.in +++ b/test/Makefile.in @@ -89,8 +89,8 @@ am__EXEEXT_1 = testhdf5$(EXEEXT) lheap$(EXEEXT) ohdr$(EXEEXT) \ unlink$(EXEEXT) big$(EXEEXT) mtime$(EXEEXT) fillval$(EXEEXT) \ mount$(EXEEXT) flush1$(EXEEXT) flush2$(EXEEXT) \ app_ref$(EXEEXT) enum$(EXEEXT) set_extent$(EXEEXT) \ - ttsafe$(EXEEXT) dectris_tst$(EXEEXT) getname$(EXEEXT) \ - vfd$(EXEEXT) ntypes$(EXEEXT) dangle$(EXEEXT) \ + ttsafe$(EXEEXT) dectris_tst$(EXEEXT) dectris_perf$(EXEEXT) \ + getname$(EXEEXT) vfd$(EXEEXT) ntypes$(EXEEXT) dangle$(EXEEXT) \ dtransform$(EXEEXT) reserved$(EXEEXT) cross_read$(EXEEXT) \ freespace$(EXEEXT) mf$(EXEEXT) farray$(EXEEXT) earray$(EXEEXT) \ btree2$(EXEEXT) fheap$(EXEEXT) file_image$(EXEEXT) @@ -148,6 +148,10 @@ dangle_SOURCES = dangle.c dangle_OBJECTS = dangle.$(OBJEXT) dangle_LDADD = $(LDADD) dangle_DEPENDENCIES = libh5test.la $(LIBHDF5) +dectris_perf_SOURCES = dectris_perf.c +dectris_perf_OBJECTS = dectris_perf.$(OBJEXT) +dectris_perf_LDADD = $(LDADD) +dectris_perf_DEPENDENCIES = libh5test.la $(LIBHDF5) dectris_tst_SOURCES = dectris_tst.c dectris_tst_OBJECTS = dectris_tst.$(OBJEXT) dectris_tst_LDADD = $(LDADD) @@ -430,23 +434,24 @@ am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) am__v_GEN_0 = @echo " GEN " $@; SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c bittests.c \ btree2.c cache.c cache_api.c cache_tagging.c cmpd_dset.c \ - cross_read.c dangle.c dectris_tst.c dsets.c dt_arith.c \ - dtransform.c dtypes.c earray.c efc.c enum.c err_compat.c \ - error_test.c extend.c external.c farray.c fheap.c file_image.c \ - fillval.c filter_fail.c flush1.c flush2.c freespace.c \ - gen_bad_ohdr.c gen_bogus.c gen_cross.c gen_deflate.c \ - gen_file_image.c gen_filespace.c gen_filters.c gen_new_array.c \ - gen_new_fill.c gen_new_group.c gen_new_mtime.c gen_new_super.c \ - gen_noencoder.c gen_nullspace.c gen_sizes_lheap.c \ - gen_specmetaread.c gen_udlinks.c getname.c gheap.c hyperslab.c \ - istore.c lheap.c links.c links_env.c mf.c mount.c mtime.c \ - ntypes.c objcopy.c ohdr.c pool.c reserved.c set_extent.c \ - space_overflow.c stab.c tcheck_version.c $(testhdf5_SOURCES) \ - testmeta.c $(ttsafe_SOURCES) unlink.c vfd.c + cross_read.c dangle.c dectris_perf.c dectris_tst.c dsets.c \ + dt_arith.c dtransform.c dtypes.c earray.c efc.c enum.c \ + err_compat.c error_test.c extend.c external.c farray.c fheap.c \ + file_image.c fillval.c filter_fail.c flush1.c flush2.c \ + freespace.c gen_bad_ohdr.c gen_bogus.c gen_cross.c \ + gen_deflate.c gen_file_image.c gen_filespace.c gen_filters.c \ + gen_new_array.c gen_new_fill.c gen_new_group.c gen_new_mtime.c \ + gen_new_super.c gen_noencoder.c gen_nullspace.c \ + gen_sizes_lheap.c gen_specmetaread.c gen_udlinks.c getname.c \ + gheap.c hyperslab.c istore.c lheap.c links.c links_env.c mf.c \ + mount.c mtime.c ntypes.c objcopy.c ohdr.c pool.c reserved.c \ + set_extent.c space_overflow.c stab.c tcheck_version.c \ + $(testhdf5_SOURCES) testmeta.c $(ttsafe_SOURCES) unlink.c \ + vfd.c DIST_SOURCES = $(libh5test_la_SOURCES) accum.c app_ref.c big.c \ bittests.c btree2.c cache.c cache_api.c cache_tagging.c \ - cmpd_dset.c cross_read.c dangle.c dectris_tst.c dsets.c \ - dt_arith.c dtransform.c dtypes.c earray.c efc.c enum.c \ + cmpd_dset.c cross_read.c dangle.c dectris_perf.c dectris_tst.c \ + dsets.c dt_arith.c dtransform.c dtypes.c earray.c efc.c enum.c \ err_compat.c error_test.c extend.c external.c farray.c fheap.c \ file_image.c fillval.c filter_fail.c flush1.c flush2.c \ freespace.c gen_bad_ohdr.c gen_bogus.c gen_cross.c \ @@ -776,7 +781,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog accum.h5 cmpd_dset.h5 \ earray.h5 efc[0-5].h5 log_vfd_out.log new_multi_file_v16-r.h5 \ new_multi_file_v16-s.h5 split_get_file_image_test-m.h5 \ split_get_file_image_test-r.h5 file_image_core_test.h5.copy \ - dectris.h5 + dectris.h5 dectris_perf.h5 INCLUDES = -I$(top_srcdir)/src -I$(top_builddir)/src # Test script for error_test and err_compat @@ -795,7 +800,7 @@ TEST_PROG = testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \ pool accum hyperslab istore bittests dt_arith \ dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \ big mtime fillval mount flush1 flush2 app_ref enum \ - set_extent ttsafe dectris_tst \ + set_extent ttsafe dectris_tst dectris_perf \ getname vfd ntypes dangle dtransform reserved cross_read \ freespace mf farray earray btree2 fheap file_image @@ -958,6 +963,9 @@ cross_read$(EXEEXT): $(cross_read_OBJECTS) $(cross_read_DEPENDENCIES) dangle$(EXEEXT): $(dangle_OBJECTS) $(dangle_DEPENDENCIES) @rm -f dangle$(EXEEXT) $(AM_V_CCLD)$(LINK) $(dangle_OBJECTS) $(dangle_LDADD) $(LIBS) +dectris_perf$(EXEEXT): $(dectris_perf_OBJECTS) $(dectris_perf_DEPENDENCIES) + @rm -f dectris_perf$(EXEEXT) + $(AM_V_CCLD)$(LINK) $(dectris_perf_OBJECTS) $(dectris_perf_LDADD) $(LIBS) dectris_tst$(EXEEXT): $(dectris_tst_OBJECTS) $(dectris_tst_DEPENDENCIES) @rm -f dectris_tst$(EXEEXT) $(AM_V_CCLD)$(LINK) $(dectris_tst_OBJECTS) $(dectris_tst_LDADD) $(LIBS) @@ -1160,6 +1168,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cmpd_dset.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cross_read.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dangle.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dectris_perf.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dectris_tst.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dsets.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dt_arith.Po@am__quote@ diff --git a/test/dectris_perf.c b/test/dectris_perf.c new file mode 100644 index 0000000..e743a19 --- /dev/null +++ b/test/dectris_perf.c @@ -0,0 +1,535 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * This test is for the DECTRIS project to the H5PSIdirect_write function + * + */ + +#include "h5test.h" +#include <zlib.h> +#include <math.h> +#include <stdlib.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> + +const char *FILENAME[] = { + "dectris_perf", + "unix.raw", + NULL +}; + +#define DIRECT_DSET "direct_dset" +#define COMPRESSED_DSET "compressed_dset" +#define NO_COMPRESS_DSET "no_compress_dset" +#define RANK 3 +#define NX 100 +#define NY 100 +#define NZ 25 +#define CHUNK_NX 1 +#define CHUNK_NY 100 +#define CHUNK_NZ 25 + +#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*1.001)+12) +char filename[1024]; +unsigned int *outbuf[NX]; +size_t data_size[NX]; +double total_size = 0.0; +unsigned int *direct_buf[NX]; +double MB = 1048576.0; + +/*-------------------------------------------------- + * Function to report IO rate + *-------------------------------------------------- + */ +void reportTime(struct timeval start, double mbytes) +{ + struct timeval timeval_stop,timeval_diff; + + /*end timing*/ + gettimeofday(&timeval_stop,NULL); + + /* Calculate the elapsed gettimeofday time */ + timeval_diff.tv_usec=timeval_stop.tv_usec-start.tv_usec; + timeval_diff.tv_sec=timeval_stop.tv_sec-start.tv_sec; + + if(timeval_diff.tv_usec<0) { + timeval_diff.tv_usec+=1000000; + timeval_diff.tv_sec--; + } /* end if */ + +/*printf("mbytes=%lf, sec=%lf, usec=%lf\n", mbytes, (double)timeval_diff.tv_sec, (double)timeval_diff.tv_usec);*/ + printf("MBytes/second: %lf\n", (double)mbytes/((double)timeval_diff.tv_sec+((double)timeval_diff.tv_usec/(double)1000000.0))); +} + +/*-------------------------------------------------- + * Create file, datasets, and initialize data + *-------------------------------------------------- + */ +int create_file(hid_t fapl_id) +{ + hid_t file; /* handles */ + hid_t fapl; + hid_t cparms; + hid_t dataspace, dataset; + hsize_t dims[RANK] = {NX, NY, NZ}; + hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ}; + unsigned int aggression = 9; /* Compression aggression setting */ + int ret; + int i, j, n; + + unsigned int *p; + size_t buf_size = CHUNK_NY*CHUNK_NZ*sizeof(unsigned int); + + const Bytef *z_src; + Bytef *z_dst; /*destination buffer */ + uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size); + uLong z_src_nbytes = (uLong)buf_size; + + TESTING("Create a file and dataset"); + + /* + * Create the data space with unlimited dimensions. + */ + if((dataspace = H5Screate_simple(RANK, dims, NULL)) < 0) + TEST_ERROR; + + /* + * Create a new file. If file exists its contents will be overwritten. + */ + if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) + TEST_ERROR; + + /* + * Modify dataset creation properties, i.e. enable chunking and compression + */ + if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR; + + if(H5Pset_chunk( cparms, RANK, chunk_dims) < 0) + TEST_ERROR; + + /* + * Create a new dataset within the file using cparms + * creation properties. + */ + if((dataset = H5Dcreate2(file, NO_COMPRESS_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, + cparms, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if(H5Dclose(dataset) < 0) + TEST_ERROR; + + /* Set compression */ + if(H5Pset_deflate( cparms, aggression) < 0) + TEST_ERROR; + + if((dataset = H5Dcreate2(file, DIRECT_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, + cparms, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if(H5Dclose(dataset) < 0) + TEST_ERROR; + + + if((dataset = H5Dcreate2(file, COMPRESSED_DSET, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, + cparms, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if(H5Dclose(dataset) < 0) + TEST_ERROR; + + if(H5Fclose(file) < 0) + TEST_ERROR; + + if(H5Sclose(dataspace) < 0) + TEST_ERROR; + + if(H5Pclose(cparms) < 0) + TEST_ERROR; + + /* Initialize data for chunks */ + for(i = 0; i < NX; i++) { + p = direct_buf[i] = (unsigned int*)malloc(CHUNK_NY*CHUNK_NZ*sizeof(unsigned int)); + + for(j=0; j < CHUNK_NY*CHUNK_NZ; j++, p++) + *p = rand() % 65000; + + z_src = (const Bytef*)direct_buf[i]; + + z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size); + /* Allocate output (compressed) buffer */ + outbuf[i] = (unsigned int*)malloc((size_t)z_dst_nbytes); + z_dst = (Bytef *)outbuf[i]; + + /* Perform compression from the source to the destination buffer */ + ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression); + + data_size[i] = (size_t)z_dst_nbytes; + total_size += data_size[i]; + + /* Check for various zlib errors */ + if(Z_BUF_ERROR == ret) { + fprintf(stderr, "overflow"); + TEST_ERROR; + } else if(Z_MEM_ERROR == ret) { + fprintf(stderr, "deflate memory error"); + TEST_ERROR; + } else if(Z_OK != ret) { + fprintf(stderr, "other deflate error"); + TEST_ERROR; + } + } + + + PASSED(); + +error: + H5E_BEGIN_TRY { + H5Dclose(dataset); + H5Sclose(dataspace); + H5Pclose(cparms); + H5Fclose(file); + } H5E_END_TRY; + return 1; +} + +/*-------------------------------------------------- + * Benchmark the performance of the new function + *-------------------------------------------------- + */ +int +test_direct_write(hid_t fapl_id) +{ + hid_t file; /* handles */ + hid_t dataspace, dataset; + hid_t dxpl; + herr_t status; + int i; + + unsigned filter_mask = 0; + hsize_t offset[RANK+1] = {0, 0, 0, 0}; + + struct timeval timeval_start; + + TESTING("H5PSIdirect_write for DECTRIS project"); + + if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + TEST_ERROR; + + /* Start the timer */ + gettimeofday(&timeval_start,NULL); + + /* Reopen the file and dataset */ + if((file = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SYNC, fapl_id)) < 0) + TEST_ERROR; + + if((dataset = H5Dopen(file, DIRECT_DSET, H5P_DEFAULT)) < 0) + TEST_ERROR; + + + /* Write the compressed chunk data repeatedly to cover all the chunks in the + * dataset, using the direct writing function. */ + for(i=0; i<NX; i++) { + status = H5PSIdirect_write(dataset, dxpl, filter_mask, offset, data_size[i], outbuf[i]); + (offset[0])++; + } + + /* + * Close/release resources. + */ + H5Dclose(dataset); + H5Pclose(dxpl); + H5Fclose(file); + + /* Report the performance */ + reportTime(timeval_start, (double)(total_size/MB)); + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(dataset); + H5Pclose(dxpl); + H5Fclose(file); + } H5E_END_TRY; + return 1; +} + +/*-------------------------------------------------- + * Benchmark the performance of the regular H5Dwrite + * with compression filter enabled. + *-------------------------------------------------- + */ +int +test_compressed_write(hid_t fapl_id) +{ + hid_t file; /* handles */ + hid_t dataspace, dataset; + hid_t mem_space; + hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ}; + hid_t dxpl; + herr_t status; + int i; + + hsize_t start[RANK]; /* Start of hyperslab */ + hsize_t stride[RANK]; /* Stride of hyperslab */ + hsize_t count[RANK]; /* Block count */ + hsize_t block[RANK]; /* Block sizes */ + + struct timeval timeval_start; + + TESTING("H5Dwrite with compression enabled"); + + if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + TEST_ERROR; + + if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0) + TEST_ERROR; + + /* Start the timer */ + gettimeofday(&timeval_start,NULL); + + /* Reopen the file and dataset */ + if((file = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SYNC, fapl_id)) < 0) + TEST_ERROR; + + if((dataset = H5Dopen(file, COMPRESSED_DSET, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if((dataspace = H5Dget_space(dataset)) < 0) + TEST_ERROR; + + start[0] = start[1] = start[2] = 0; + stride[0] = stride[1] = stride[2] = 1; + count[0] = count[1] = count[2] = 1; + block[0] = CHUNK_NX; block[1] = CHUNK_NY; block[2] = CHUNK_NZ; + + for(i=0; i<NX; i++) { + /* + * Select hyperslab for one chunk in the file + */ + if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0) + TEST_ERROR; + (start[0])++; + + if((status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_space, dataspace, + H5P_DEFAULT, direct_buf[i])) < 0) + TEST_ERROR; + } + + /* + * Close/release resources. + */ + H5Dclose(dataset); + H5Sclose(dataspace); + H5Sclose(mem_space); + H5Pclose(dxpl); + H5Fclose(file); + + /* Report the performance */ + reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB)); + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(dataset); + H5Sclose(dataspace); + H5Sclose(mem_space); + H5Pclose(dxpl); + H5Fclose(file); + } H5E_END_TRY; + return 1; +} + +/*-------------------------------------------------- + * Benchmark the performance of the regular H5Dwrite + * with compression + *-------------------------------------------------- + */ +int +test_no_compress_write(hid_t fapl_id) +{ + hid_t file; /* handles */ + hid_t dataspace, dataset; + hid_t mem_space; + hsize_t chunk_dims[RANK] ={CHUNK_NX, CHUNK_NY, CHUNK_NZ}; + hid_t dxpl; + herr_t status; + int i; + + hsize_t start[RANK]; /* Start of hyperslab */ + hsize_t stride[RANK]; /* Stride of hyperslab */ + hsize_t count[RANK]; /* Block count */ + hsize_t block[RANK]; /* Block sizes */ + + struct timeval timeval_start; + + TESTING("H5Dwrite without compression"); + + if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) + TEST_ERROR; + + if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0) + TEST_ERROR; + + /* Start the timer */ + gettimeofday(&timeval_start,NULL); + + /* Reopen the file and dataset */ + if((file = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SYNC, fapl_id)) < 0) + TEST_ERROR; + + if((dataset = H5Dopen(file, NO_COMPRESS_DSET, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if((dataspace = H5Dget_space(dataset)) < 0) + TEST_ERROR; + + start[0] = start[1] = start[2] = 0; + stride[0] = stride[1] = stride[2] = 1; + count[0] = count[1] = count[2] = 1; + block[0] = CHUNK_NX; block[1] = CHUNK_NY; block[2] = CHUNK_NZ; + + for(i=0; i<NX; i++) { + /* + * Select hyperslab for one chunk in the file + */ + if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0) + TEST_ERROR; + (start[0])++; + + if((status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_space, dataspace, + H5P_DEFAULT, direct_buf[i])) < 0) + TEST_ERROR; + } + + /* + * Close/release resources. + */ + H5Dclose(dataset); + H5Sclose(dataspace); + H5Sclose(mem_space); + H5Pclose(dxpl); + H5Fclose(file); + + /* Report the performance */ + reportTime(timeval_start, (double)(NX*NY*NZ*sizeof(unsigned int)/MB)); + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(dataset); + H5Sclose(dataspace); + H5Sclose(mem_space); + H5Pclose(dxpl); + H5Fclose(file); + } H5E_END_TRY; + return 1; +} + +/*-------------------------------------------------- + * Benchmark the performance for writing compressed + * data to a Unix file + *-------------------------------------------------- + */ +int +test_unix_write(void) +{ + int file, flag; + ssize_t op_size; + int i; + struct timeval timeval_start; + + TESTING("Write compressed data to a Unix file"); + + /* create file*/ + flag = O_CREAT|O_TRUNC|O_WRONLY|O_SYNC; + + /* Start the timer */ + gettimeofday(&timeval_start,NULL); + + if ((file=open(FILENAME[1],flag,S_IRWXU))== -1) + TEST_ERROR; + + /* Write the compressed chunk data repeatedly to cover all the chunks in the + * dataset, using the direct writing function. */ + for(i=0; i<NX; i++) { + op_size = write(file, outbuf[i],data_size[i]); + if (op_size < 0) + { + printf(" Error in writing data to file because %s \n", strerror(errno)); + TEST_ERROR; + } + else if (op_size == 0) + { + printf(" unable to write sufficent data to file because %s \n", strerror(errno)); + TEST_ERROR; + } + } + + if (close(file) < 0) + { + printf(" unable to close the file\n"); + TEST_ERROR; + } + + /* Report the performance */ + reportTime(timeval_start, (double)(total_size/MB)); + + PASSED(); + return 0; + +error: + return 1; +} + +/*-------------------------------------------------- + * Main function + *-------------------------------------------------- + */ +int +main (void) +{ + hid_t fapl; + int i; + + /* Testing setup */ + h5_reset(); + fapl = h5_fileaccess(); + + h5_fixname(FILENAME[0], fapl, filename, sizeof filename); + + create_file(fapl); + test_direct_write(fapl); + test_no_compress_write(fapl); + test_compressed_write(fapl); + test_unix_write(); + + for(i=0; i<NX; i++) { + free(outbuf[i]); + free(direct_buf[i]); + } + + h5_cleanup(FILENAME, fapl); + return 0; +} diff --git a/test/dectris_tst.c b/test/dectris_tst.c index f2cc356..a6bbaab 100644 --- a/test/dectris_tst.c +++ b/test/dectris_tst.c @@ -55,6 +55,7 @@ main (void) int check[NX][NY]; int i, j, n; + unsigned filter_mask = 0; int direct_buf[CHUNK_NX][CHUNK_NY]; int check_chunk[CHUNK_NX][CHUNK_NY]; hsize_t offset[3] = {0, 0, 0}; @@ -159,13 +160,15 @@ main (void) * dataset, using the direct writing function. */ for(i=0; i<NX/CHUNK_NX; i++) { for(j=0; j<NY/CHUNK_NY; j++) { - status = H5PSIdirect_write(dataset, dxpl, offset, z_dst_nbytes, outbuf); + status = H5PSIdirect_write(dataset, dxpl, filter_mask, offset, z_dst_nbytes, outbuf); offset[1] += CHUNK_NY; } offset[0] += CHUNK_NX; offset[1] = 0; } + free(outbuf); + if(H5Dclose(dataset) < 0) TEST_ERROR; @@ -197,13 +200,83 @@ main (void) for(i = 0; i < CHUNK_NX; i++) { for(j = 0; j < CHUNK_NY; j++) { if(direct_buf[i][j] != check_chunk[i][j]) { - printf(" Read different values than written."); + printf(" 1. Read different values than written."); printf(" At index %d,%d\n", i, j); printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]); + goto error; } } } + /* Reinitialize different data for one chunk */ + for(i = 0; i < CHUNK_NX; i++) + for(j = 0; j < CHUNK_NY; j++) + direct_buf[i][j] = i + j; + + /* Allocate output (compressed) buffer */ + outbuf = malloc(z_dst_nbytes); + z_dst = (Bytef *)outbuf; + + /* Perform compression from the source to the destination buffer */ + ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression); + + /* Check for various zlib errors */ + if(Z_BUF_ERROR == ret) { + fprintf(stderr, "overflow"); + TEST_ERROR; + } else if(Z_MEM_ERROR == ret) { + fprintf(stderr, "deflate memory error"); + TEST_ERROR; + } else if(Z_OK != ret) { + fprintf(stderr, "other deflate error"); + TEST_ERROR; + } + + /* Rewrite the compressed chunk data repeatedly to cover all the chunks in the + * dataset, using the direct writing function. */ + offset[0] = offset[1] = 0; + for(i=0; i<NX/CHUNK_NX; i++) { + for(j=0; j<NY/CHUNK_NY; j++) { + status = H5PSIdirect_write(dataset, dxpl, filter_mask, offset, z_dst_nbytes, outbuf); + offset[1] += CHUNK_NY; + } + offset[0] += CHUNK_NX; + offset[1] = 0; + } + + free(outbuf); + + if(H5Dclose(dataset) < 0) + TEST_ERROR; + + if(H5Fclose(file) < 0) + TEST_ERROR; + + /* Reopen the file and dataset */ + if((file = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) + TEST_ERROR; + + if((dataset = H5Dopen(file, DATASETNAME, H5P_DEFAULT)) < 0) + TEST_ERROR; + + /* Read the chunk back */ + if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0) + TEST_ERROR; + + /* Check that the values read are the same as the values written */ + for(i = 0; i < CHUNK_NX; i++) { + for(j = 0; j < CHUNK_NY; j++) { + if(direct_buf[i][j] != check_chunk[i][j]) { + printf(" 2. Read different values than written."); + printf(" At index %d,%d\n", i, j); + printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]); + goto error; + } + } + } + + + /* * Close/release resources. */ @@ -214,8 +287,6 @@ main (void) H5Pclose(dxpl); H5Fclose(file); - free(outbuf); - h5_cleanup(FILENAME, fapl); PASSED(); return 0; |