summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDana Robinson <derobins@hdfgroup.org>2015-12-15 09:48:59 (GMT)
committerDana Robinson <derobins@hdfgroup.org>2015-12-15 09:48:59 (GMT)
commit4fe9d56f786f1f43e06655fa238ac7b11a194ba1 (patch)
tree7b1d0a0d729a2fe508efb0c46360d5204edec7b1
parente9da1c3460abdb32496f9271edf2a2dbbaa4e72b (diff)
downloadhdf5-4fe9d56f786f1f43e06655fa238ac7b11a194ba1.zip
hdf5-4fe9d56f786f1f43e06655fa238ac7b11a194ba1.tar.gz
hdf5-4fe9d56f786f1f43e06655fa238ac7b11a194ba1.tar.bz2
[svn-r28658] Manual merge of revise_chunks and alpha branch.
Tested on: Unbuntu 15.10 (Linux 4.2.0 x86_64) gcc 5.2.1 Autotools serial (C++ Fortran) Autotools parallel (MPICH 3.1.4, Fortran) CMake serial (CMake 3.3.2)
-rw-r--r--MANIFEST165
-rwxr-xr-xbin/cmakehdf5266
-rw-r--r--c++/test/tfile.cpp2
-rw-r--r--c++/test/th5s.h5bin2049 -> 2049 bytes
-rw-r--r--configure.ac9
-rw-r--r--hl/src/H5DO.c184
-rw-r--r--hl/src/H5DOpublic.h13
-rw-r--r--hl/src/H5LD.c639
-rw-r--r--hl/src/H5LDprivate.h49
-rw-r--r--hl/src/H5LDpublic.h33
-rw-r--r--hl/src/H5TB.c2
-rw-r--r--hl/src/Makefile.am4
-rw-r--r--hl/src/hdf5_hl.h1
-rw-r--r--hl/test/Makefile.am8
-rw-r--r--hl/test/gen_test_ld.c379
-rw-r--r--hl/test/test_dset_append.c1196
-rw-r--r--hl/test/test_dset_opt.c2
-rw-r--r--hl/test/test_file_image.c36
-rw-r--r--hl/test/test_ld.c1430
-rw-r--r--hl/test/test_ld.h5bin0 -> 42931 bytes
-rw-r--r--hl/test/test_table_be.h5bin55912 -> 55912 bytes
-rw-r--r--hl/test/test_table_cray.h5bin55912 -> 55912 bytes
-rw-r--r--hl/test/test_table_le.h5bin53880 -> 53880 bytes
-rw-r--r--hl/tools/Makefile.am4
-rw-r--r--hl/tools/h5watch/Makefile.am44
-rw-r--r--hl/tools/h5watch/extend_dset.c489
-rw-r--r--hl/tools/h5watch/h5watch.c977
-rw-r--r--hl/tools/h5watch/h5watchgentest.c355
-rw-r--r--hl/tools/h5watch/swmr_check_compat_vfd.c59
-rw-r--r--hl/tools/h5watch/testh5watch.sh.in395
-rw-r--r--hl/tools/testfiles/w-err-cmpd1.ddl5
-rw-r--r--hl/tools/testfiles/w-err-cmpd2.ddl5
-rw-r--r--hl/tools/testfiles/w-err-cmpd3.ddl5
-rw-r--r--hl/tools/testfiles/w-err-cmpd4.ddl5
-rw-r--r--hl/tools/testfiles/w-err-cmpd5.ddl5
-rw-r--r--hl/tools/testfiles/w-err-dset-nomax.ddl5
-rw-r--r--hl/tools/testfiles/w-err-dset-none.ddl5
-rw-r--r--hl/tools/testfiles/w-err-dset1.ddl5
-rw-r--r--hl/tools/testfiles/w-err-dset2.ddl5
-rw-r--r--hl/tools/testfiles/w-err-file.ddl4
-rw-r--r--hl/tools/testfiles/w-err-poll.ddl38
-rw-r--r--hl/tools/testfiles/w-err-poll0.ddl38
-rw-r--r--hl/tools/testfiles/w-err-width.ddl38
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl14
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl13
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl14
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc.ddl16
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-f1.ddl14
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-f2.ddl14
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-ff3.ddl13
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-label.ddl21
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two-f1.ddl50
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two-f3.ddl44
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl50
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two.ddl70
-rw-r--r--hl/tools/testfiles/w-ext-cmpd.ddl16
-rw-r--r--hl/tools/testfiles/w-ext-early.ddl13
-rw-r--r--hl/tools/testfiles/w-ext-late.ddl13
-rw-r--r--hl/tools/testfiles/w-ext-one-d.ddl9
-rw-r--r--hl/tools/testfiles/w-ext-one-simple.ddl16
-rw-r--r--hl/tools/testfiles/w-ext-one.ddl13
-rw-r--r--hl/tools/testfiles/w-ext-two-d.ddl21
-rw-r--r--hl/tools/testfiles/w-ext-two-width.ddl44
-rw-r--r--hl/tools/testfiles/w-ext-two.ddl40
-rw-r--r--hl/tools/testfiles/w-help1.ddl38
-rw-r--r--src/CMakeLists.txt12
-rw-r--r--src/H5AC.c527
-rw-r--r--src/H5AClog.c892
-rw-r--r--src/H5ACmpio.c2
-rw-r--r--src/H5ACpkg.h60
-rw-r--r--src/H5ACprivate.h82
-rw-r--r--src/H5ACpublic.h6
-rw-r--r--src/H5Adense.c97
-rw-r--r--src/H5Aint.c40
-rw-r--r--src/H5Apkg.h25
-rw-r--r--src/H5B.c36
-rw-r--r--src/H5B2.c251
-rw-r--r--src/H5B2cache.c494
-rw-r--r--src/H5B2dbg.c7
-rw-r--r--src/H5B2hdr.c19
-rw-r--r--src/H5B2int.c1416
-rw-r--r--src/H5B2pkg.h62
-rw-r--r--src/H5B2private.h21
-rw-r--r--src/H5B2stat.c2
-rw-r--r--src/H5B2test.c42
-rw-r--r--src/H5Bcache.c26
-rw-r--r--src/H5Bdbg.c4
-rw-r--r--src/H5Bpkg.h4
-rw-r--r--src/H5Bprivate.h2
-rw-r--r--src/H5C.c3058
-rw-r--r--src/H5Cpkg.h48
-rw-r--r--src/H5Cprivate.h245
-rw-r--r--src/H5D.c148
-rw-r--r--src/H5Dbtree.c2
-rw-r--r--src/H5Dbtree2.c1640
-rw-r--r--src/H5Dchunk.c1051
-rw-r--r--src/H5Dcompact.c4
-rw-r--r--src/H5Dcontig.c5
-rw-r--r--src/H5Dearray.c1866
-rw-r--r--src/H5Dfarray.c1757
-rw-r--r--src/H5Dint.c444
-rw-r--r--src/H5Dlayout.c297
-rw-r--r--src/H5Dnone.c497
-rw-r--r--src/H5Doh.c22
-rw-r--r--src/H5Dpkg.h105
-rw-r--r--src/H5Dprivate.h10
-rw-r--r--src/H5Dpublic.h22
-rw-r--r--src/H5Dsingle.c557
-rw-r--r--src/H5Dtest.c41
-rw-r--r--src/H5Dvirtual.c222
-rw-r--r--src/H5EA.c203
-rw-r--r--src/H5EAcache.c707
-rw-r--r--src/H5EAhdr.c31
-rw-r--r--src/H5EApkg.h82
-rw-r--r--src/H5EAprivate.h31
-rw-r--r--src/H5F.c441
-rw-r--r--src/H5FA.c113
-rw-r--r--src/H5FAcache.c411
-rw-r--r--src/H5FAhdr.c3
-rw-r--r--src/H5FAint.c139
-rw-r--r--src/H5FApkg.h34
-rw-r--r--src/H5FAprivate.h14
-rw-r--r--src/H5FD.c30
-rw-r--r--src/H5FDdirect.c1
-rw-r--r--src/H5FDint.c10
-rw-r--r--src/H5FDlog.c1
-rw-r--r--src/H5FDpkg.h1
-rw-r--r--src/H5FDprivate.h1
-rw-r--r--src/H5FDpublic.h7
-rw-r--r--src/H5FDsec2.c1
-rw-r--r--src/H5FDstdio.c7
-rw-r--r--src/H5FDtest.c119
-rw-r--r--src/H5FS.c79
-rw-r--r--src/H5FScache.c191
-rw-r--r--src/H5FSint.c145
-rw-r--r--src/H5FSpkg.h7
-rw-r--r--src/H5FSprivate.h3
-rw-r--r--src/H5Faccum.c11
-rw-r--r--src/H5Fint.c347
-rw-r--r--src/H5Fio.c238
-rw-r--r--src/H5Fpkg.h51
-rw-r--r--src/H5Fprivate.h68
-rw-r--r--src/H5Fpublic.h35
-rw-r--r--src/H5Fquery.c51
-rw-r--r--src/H5Fsuper.c54
-rw-r--r--src/H5Fsuper_cache.c555
-rw-r--r--src/H5G.c68
-rw-r--r--src/H5Gcache.c22
-rw-r--r--src/H5Gdense.c32
-rw-r--r--src/H5Gint.c11
-rw-r--r--src/H5Gobj.c4
-rw-r--r--src/H5Goh.c4
-rw-r--r--src/H5Gpublic.h2
-rw-r--r--src/H5Gstab.c8
-rw-r--r--src/H5Gtest.c6
-rw-r--r--src/H5HF.c87
-rw-r--r--src/H5HFcache.c474
-rw-r--r--src/H5HFhdr.c3
-rw-r--r--src/H5HFhuge.c25
-rw-r--r--src/H5HFint.c146
-rw-r--r--src/H5HFpkg.h15
-rw-r--r--src/H5HFprivate.h2
-rw-r--r--src/H5HFstat.c2
-rw-r--r--src/H5HG.c24
-rw-r--r--src/H5HGcache.c46
-rw-r--r--src/H5HGpkg.h6
-rw-r--r--src/H5HLcache.c86
-rw-r--r--src/H5I.c69
-rw-r--r--src/H5Iprivate.h1
-rw-r--r--src/H5Lexternal.c2
-rw-r--r--src/H5MF.c2
-rw-r--r--src/H5O.c249
-rw-r--r--src/H5Oainfo.c30
-rw-r--r--src/H5Oalloc.c92
-rw-r--r--src/H5Oattribute.c103
-rw-r--r--src/H5Ocache.c500
-rw-r--r--src/H5Ochunk.c26
-rw-r--r--src/H5Ocopy.c34
-rw-r--r--src/H5Oflush.c410
-rw-r--r--src/H5Olayout.c274
-rw-r--r--src/H5Opkg.h124
-rw-r--r--src/H5Oprivate.h104
-rw-r--r--src/H5Oproxy.c952
-rw-r--r--src/H5Opublic.h5
-rw-r--r--src/H5Otest.c35
-rw-r--r--src/H5Pdapl.c132
-rw-r--r--src/H5Pdcpl.c159
-rw-r--r--src/H5Pdxpl.c8
-rw-r--r--src/H5Pfapl.c571
-rw-r--r--src/H5Plapl.c4
-rw-r--r--src/H5Ppkg.h1
-rw-r--r--src/H5Ppublic.h12
-rw-r--r--src/H5Ptest.c46
-rw-r--r--src/H5SM.c20
-rw-r--r--src/H5SMcache.c139
-rw-r--r--src/H5T.c83
-rw-r--r--src/H5Tcommit.c2
-rw-r--r--src/H5Tpublic.h2
-rw-r--r--src/H5VMprivate.h33
-rw-r--r--src/H5err.txt3
-rw-r--r--src/H5public.h4
-rw-r--r--src/H5trace.c46
-rw-r--r--src/Makefile.am33
-rw-r--r--test/AtomicWriterReader.txt48
-rw-r--r--test/CMakeLists.txt20
-rw-r--r--test/CMakeTests.cmake19
-rw-r--r--test/Makefile.am71
-rw-r--r--test/POSIX_Order_Write_Test_Report.docxbin0 -> 145445 bytes
-rw-r--r--test/POSIX_Order_Write_Test_Report.pdfbin0 -> 84166 bytes
-rw-r--r--test/SWMR_POSIX_Order_UG.txt94
-rw-r--r--test/SWMR_UseCase_UG.txt223
-rw-r--r--test/accum.c342
-rw-r--r--test/accum_swmr_reader.c99
-rw-r--r--test/atomic_reader.c347
-rw-r--r--test/atomic_writer.c230
-rw-r--r--test/bad_compound.h5bin2208 -> 2208 bytes
-rw-r--r--test/btree2.c28
-rw-r--r--test/btree_idx_1_6.h5bin0 -> 6350 bytes
-rw-r--r--test/btree_idx_1_8.h5bin0 -> 5065 bytes
-rw-r--r--test/cache.c3368
-rw-r--r--test/cache_common.c736
-rw-r--r--test/cache_common.h44
-rw-r--r--test/cache_logging.c176
-rw-r--r--test/cork.c2191
-rw-r--r--test/corrupt_stab_msg.h5bin2928 -> 2928 bytes
-rw-r--r--test/deflate.h5bin6240 -> 6240 bytes
-rw-r--r--test/dsets.c3007
-rw-r--r--test/earray.c321
-rw-r--r--test/family_v16_00000.h5bin5120 -> 5120 bytes
-rw-r--r--test/filespace_1_6.h5bin2448 -> 2448 bytes
-rw-r--r--test/fill_old.h5bin2560 -> 2560 bytes
-rw-r--r--test/flush2.c68
-rw-r--r--test/flushrefresh.c1279
-rw-r--r--test/gen_idx.c126
-rw-r--r--test/gen_plist.c4
-rw-r--r--test/group_old.h5bin1952 -> 1952 bytes
-rw-r--r--test/h5test.c67
-rw-r--r--test/h5test.h6
-rw-r--r--test/links.c121
-rw-r--r--test/mergemsg.h5bin3472 -> 3472 bytes
-rw-r--r--test/multi_file_v16-s.h5bin2048 -> 2048 bytes
-rw-r--r--test/noencoder.h5bin8088 -> 8088 bytes
-rw-r--r--test/objcopy.c1171
-rw-r--r--test/ohdr.c164
-rw-r--r--test/set_extent.c446
-rw-r--r--test/swmr.c5465
-rw-r--r--test/swmr_addrem_writer.c458
-rw-r--r--test/swmr_check_compat_vfd.c59
-rw-r--r--test/swmr_common.c292
-rw-r--r--test/swmr_common.h78
-rw-r--r--test/swmr_generator.c355
-rw-r--r--test/swmr_reader.c509
-rw-r--r--test/swmr_remove_reader.c522
-rw-r--r--test/swmr_remove_writer.c383
-rw-r--r--test/swmr_sparse_reader.c451
-rw-r--r--test/swmr_sparse_writer.c460
-rw-r--r--test/swmr_start_write.c739
-rw-r--r--test/swmr_writer.c431
-rw-r--r--test/tarrold.h5bin6032 -> 6032 bytes
-rw-r--r--test/test_filters_be.h5bin5720 -> 5720 bytes
-rw-r--r--test/test_filters_le.h5bin5720 -> 5720 bytes
-rw-r--r--test/test_usecases.sh.in169
-rw-r--r--test/testflushrefresh.sh.in196
-rw-r--r--test/testswmr.sh.in497
-rw-r--r--test/testvdsswmr.sh.in199
-rw-r--r--test/tfile.c174
-rw-r--r--test/th5s.h5bin2049 -> 2049 bytes
-rw-r--r--test/tlayouto.h5bin1576 -> 1576 bytes
-rw-r--r--test/tmtimen.h5bin1576 -> 1576 bytes
-rw-r--r--test/tmtimeo.h5bin2052 -> 2052 bytes
-rw-r--r--test/twriteorder.c438
-rw-r--r--test/use.h69
-rw-r--r--test/use_append_chunk.c214
-rw-r--r--test/use_append_mchunks.c207
-rw-r--r--test/use_common.c631
-rw-r--r--test/use_disable_mdc_flushes.c531
-rw-r--r--test/vds_swmr.h165
-rw-r--r--test/vds_swmr_gen.c178
-rw-r--r--test/vds_swmr_reader.c144
-rw-r--r--test/vds_swmr_writer.c159
-rw-r--r--testpar/t_cache.c95
-rw-r--r--testpar/t_dset.c220
-rw-r--r--testpar/t_file_image.c6
-rw-r--r--testpar/t_filter_read.c127
-rw-r--r--tools/Makefile.am3
-rw-r--r--tools/h5copy/h5copygentest.c44
-rw-r--r--tools/h5copy/testfiles/h5copytst.h5bin30448 -> 30480 bytes
-rw-r--r--tools/h5copy/testfiles/h5copytst_new.h5bin0 -> 15029 bytes
-rw-r--r--tools/h5copy/testfiles/h5copytst_new.out.ls502
-rw-r--r--tools/h5diff/h5diffgentest.c101
-rw-r--r--tools/h5diff/testfiles/h5diff_dset_idx1.h5bin0 -> 5974 bytes
-rw-r--r--tools/h5diff/testfiles/h5diff_dset_idx2.h5bin0 -> 2206 bytes
-rw-r--r--tools/h5diff/testfiles/h5diff_hyper1.h5bin1052720 -> 1052720 bytes
-rw-r--r--tools/h5diff/testfiles/h5diff_hyper2.h5bin1052720 -> 1052720 bytes
-rw-r--r--tools/h5diff/testfiles/h5diff_idx.txt14
-rw-r--r--tools/h5diff/testfiles/tmptest.he5bin4740424 -> 4740424 bytes
-rw-r--r--tools/h5diff/testfiles/tmptest2.he5bin4734280 -> 4734280 bytes
-rw-r--r--tools/h5dump/h5dump_xml.c1
-rw-r--r--tools/h5dump/h5dumpgentest.c129
-rw-r--r--tools/h5format_convert/Makefile.am49
-rw-r--r--tools/h5format_convert/h5fc_chk_idx.c101
-rw-r--r--tools/h5format_convert/h5fc_gentest.c635
-rw-r--r--tools/h5format_convert/h5format_convert.c438
-rw-r--r--tools/h5format_convert/testfiles/h5fc_d_file.ddl22
-rw-r--r--tools/h5format_convert/testfiles/h5fc_dname.ddl22
-rw-r--r--tools/h5format_convert/testfiles/h5fc_edge_v3.h5bin0 -> 2526 bytes
-rw-r--r--tools/h5format_convert/testfiles/h5fc_help.ddl21
-rw-r--r--tools/h5format_convert/testfiles/h5fc_latest_v3.h5bin0 -> 6130 bytes
-rw-r--r--tools/h5format_convert/testfiles/h5fc_non_v3.h5bin0 -> 4336 bytes
-rw-r--r--tools/h5format_convert/testfiles/h5fc_nonexistdset_file.ddl1
-rw-r--r--tools/h5format_convert/testfiles/h5fc_nonexistfile.ddl1
-rw-r--r--tools/h5format_convert/testfiles/h5fc_nooption.ddl21
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v1.h5bin0 -> 8252 bytes
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v_all.ddl26
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v_bt1.ddl11
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v_n_1d.ddl13
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v_n_all.ddl47
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v_ndata_bt1.ddl12
-rw-r--r--tools/h5format_convert/testfiles/h5fc_v_non_chunked.ddl9
-rw-r--r--tools/h5format_convert/testh5fc.sh.in400
-rw-r--r--tools/h5import/testfiles/binfp64.h5bin10760 -> 10760 bytes
-rw-r--r--tools/h5import/testfiles/binin16.h5bin10760 -> 10760 bytes
-rw-r--r--tools/h5import/testfiles/binin32.h5bin9472 -> 9472 bytes
-rw-r--r--tools/h5import/testfiles/binin8.h5bin10760 -> 10760 bytes
-rw-r--r--tools/h5import/testfiles/binuin16.h5bin10760 -> 10760 bytes
-rw-r--r--tools/h5import/testfiles/binuin32.h5bin6384 -> 6384 bytes
-rw-r--r--tools/h5import/testfiles/txtfp32.h5bin4192 -> 4192 bytes
-rw-r--r--tools/h5import/testfiles/txtfp64.h5bin9784 -> 9784 bytes
-rw-r--r--tools/h5import/testfiles/txtin16.h5bin9784 -> 9784 bytes
-rw-r--r--tools/h5import/testfiles/txtin32.h5bin4192 -> 4192 bytes
-rw-r--r--tools/h5import/testfiles/txtin8.h5bin9784 -> 9784 bytes
-rw-r--r--tools/h5import/testfiles/txtuin16.h5bin10240 -> 10240 bytes
-rw-r--r--tools/h5import/testfiles/txtuin32.h5bin6240 -> 6240 bytes
-rw-r--r--tools/h5ls/h5ls.c1
-rw-r--r--tools/h5ls/testh5ls.sh.in5
-rw-r--r--tools/h5repack/h5repack.c4
-rw-r--r--tools/h5repack/h5repack.h5
-rw-r--r--tools/h5repack/h5repack_main.c13
-rw-r--r--tools/h5repack/h5repacktst.c167
-rw-r--r--tools/h5repack/testfiles/h5repack_attr.h5bin20056 -> 20056 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_deflate.h5bin5962 -> 5962 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_early.h5bin2067224 -> 2067224 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_fill.h5bin2072 -> 2072 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_filters.h5bin29744 -> 29744 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_fletcher.h5bin7880 -> 7880 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_hlink.h5bin6576 -> 6576 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_layouto.h5bin1576 -> 1576 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_nbit.h5bin13776 -> 13776 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_shuffle.h5bin7864 -> 7864 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_soffset.h5bin11052 -> 11052 bytes
-rw-r--r--tools/h5repack/testfiles/h5repack_szip.h5bin5588 -> 5588 bytes
-rw-r--r--tools/h5stat/h5stat_gentest.c119
-rw-r--r--tools/h5stat/testfiles/h5stat_filters.h5bin46272 -> 46272 bytes
-rw-r--r--tools/h5stat/testfiles/h5stat_idx.ddl93
-rw-r--r--tools/h5stat/testfiles/h5stat_idx.h5bin0 -> 2206 bytes
-rw-r--r--tools/h5stat/testh5stat.sh.in4
-rw-r--r--tools/lib/h5tools.h11
-rw-r--r--tools/lib/h5tools_dump.c4
-rw-r--r--tools/lib/h5tools_str.c67
-rw-r--r--tools/misc/Makefile.am14
-rw-r--r--tools/misc/clear_open_chk.c72
-rw-r--r--tools/misc/h5clear.c137
-rw-r--r--tools/misc/h5clear_gentest.c174
-rw-r--r--tools/misc/h5debug.c52
-rw-r--r--tools/misc/testh5clear.sh.in130
-rw-r--r--tools/testfiles/family_file00000.h5bin1024 -> 1024 bytes
-rw-r--r--tools/testfiles/taindices.h5bin17160 -> 17160 bytes
-rw-r--r--tools/testfiles/tarray1.h5bin2112 -> 2112 bytes
-rw-r--r--tools/testfiles/tarray2.h5bin3008 -> 3008 bytes
-rw-r--r--tools/testfiles/tarray3.h5bin3200 -> 3200 bytes
-rw-r--r--tools/testfiles/tarray4.h5bin2176 -> 2176 bytes
-rw-r--r--tools/testfiles/tarray5.h5bin2368 -> 2368 bytes
-rw-r--r--tools/testfiles/tarray6.h5bin6400 -> 6400 bytes
-rw-r--r--tools/testfiles/tarray7.h5bin6400 -> 6400 bytes
-rw-r--r--tools/testfiles/tattr.h5bin3024 -> 3024 bytes
-rw-r--r--tools/testfiles/tattr2.h5bin33840 -> 33840 bytes
-rw-r--r--tools/testfiles/tbigdims.h5bin6192 -> 6192 bytes
-rw-r--r--tools/testfiles/tbitfields.h5bin2704 -> 2704 bytes
-rw-r--r--tools/testfiles/tchar.h5bin2356 -> 2356 bytes
-rw-r--r--tools/testfiles/tcompound.h5bin8192 -> 8192 bytes
-rw-r--r--tools/testfiles/tcompound2.h5bin13640 -> 13640 bytes
-rw-r--r--tools/testfiles/tcompound_complex.h5bin8192 -> 8192 bytes
-rw-r--r--tools/testfiles/tdatareg.h5bin5652 -> 5652 bytes
-rw-r--r--tools/testfiles/tdset.h5bin7648 -> 7648 bytes
-rw-r--r--tools/testfiles/tdset2.h5bin9936 -> 9936 bytes
-rw-r--r--tools/testfiles/tdset_idx.ddl61
-rw-r--r--tools/testfiles/tdset_idx.h5bin0 -> 10562 bytes
-rw-r--r--tools/testfiles/tdset_idx.ls36
-rw-r--r--tools/testfiles/tempty.h5bin4304 -> 4304 bytes
-rw-r--r--tools/testfiles/tenum.h5bin2128 -> 2128 bytes
-rw-r--r--tools/testfiles/tfamily00000.h5bin256 -> 256 bytes
-rw-r--r--tools/testfiles/tfcontents2.h5bin792 -> 792 bytes
-rw-r--r--tools/testfiles/tfvalues.h5bin9552 -> 9552 bytes
-rw-r--r--tools/testfiles/tgroup.h5bin11096 -> 11096 bytes
-rw-r--r--tools/testfiles/thlink.h5bin5536 -> 5536 bytes
-rw-r--r--tools/testfiles/thyperslab.h5bin1050880 -> 1050880 bytes
-rw-r--r--tools/testfiles/tlarge_objname.h5bin40008 -> 40008 bytes
-rw-r--r--tools/testfiles/tlonglinks.h5bin203168 -> 203168 bytes
-rw-r--r--tools/testfiles/tloop.h5bin3168 -> 3168 bytes
-rw-r--r--tools/testfiles/tloop2.h5bin3168 -> 3168 bytes
-rw-r--r--tools/testfiles/tmulti-s.h5bin2048 -> 2048 bytes
-rw-r--r--tools/testfiles/tname-amp.h5bin2880 -> 2880 bytes
-rw-r--r--tools/testfiles/tname-apos.h5bin2880 -> 2880 bytes
-rw-r--r--tools/testfiles/tname-gt.h5bin2880 -> 2880 bytes
-rw-r--r--tools/testfiles/tname-lt.h5bin2880 -> 2880 bytes
-rw-r--r--tools/testfiles/tname-quot.h5bin2880 -> 2880 bytes
-rw-r--r--tools/testfiles/tname-sp.h5bin2880 -> 2880 bytes
-rw-r--r--tools/testfiles/tnestedcomp.h5bin2072 -> 2072 bytes
-rw-r--r--tools/testfiles/tnodata.h5bin1412 -> 1412 bytes
-rw-r--r--tools/testfiles/tnullspace.h5bin3624 -> 3624 bytes
-rw-r--r--tools/testfiles/tobjref.h5bin2900 -> 2900 bytes
-rw-r--r--tools/testfiles/topaque.h5bin1744 -> 1744 bytes
-rw-r--r--tools/testfiles/tref-escapes-at.h5bin5849 -> 5849 bytes
-rw-r--r--tools/testfiles/tref-escapes.h5bin5536 -> 5536 bytes
-rw-r--r--tools/testfiles/tref.h5bin3004 -> 3004 bytes
-rw-r--r--tools/testfiles/tsaf.h5bin769444 -> 769444 bytes
-rw-r--r--tools/testfiles/tslink.h5bin1168 -> 1168 bytes
-rw-r--r--tools/testfiles/tsplit_file-m.h5bin2048 -> 2048 bytes
-rw-r--r--tools/testfiles/tstr.h5bin15608 -> 15608 bytes
-rw-r--r--tools/testfiles/tstr2.h5bin11096 -> 11096 bytes
-rw-r--r--tools/testfiles/tstr3.h5bin8736 -> 8736 bytes
-rw-r--r--tools/testfiles/tstring-at.h5bin1672 -> 1672 bytes
-rw-r--r--tools/testfiles/tstring.h5bin2160 -> 2160 bytes
-rw-r--r--tools/testfiles/tvldtypes1.h5bin8336 -> 8336 bytes
-rw-r--r--tools/testfiles/tvldtypes2.h5bin6208 -> 6208 bytes
-rw-r--r--tools/testfiles/tvldtypes3.h5bin6240 -> 6240 bytes
-rw-r--r--tools/testfiles/tvldtypes4.h5bin8192 -> 8192 bytes
-rw-r--r--tools/testfiles/tvldtypes5.h5bin8192 -> 8192 bytes
-rw-r--r--tools/testfiles/tvms.h5bin2288 -> 2288 bytes
429 files changed, 61306 insertions, 5158 deletions
diff --git a/MANIFEST b/MANIFEST
index 3923060..407b87a 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -442,6 +442,7 @@
./src/H5Aprivate.h
./src/H5Apublic.h
./src/H5AC.c
+./src/H5AClog.c
./src/H5ACmodule.h
./src/H5ACmpio.c
./src/H5ACpkg.h
@@ -475,24 +476,29 @@
./src/H5CSprivate.h
./src/H5D.c
./src/H5Dbtree.c
+./src/H5Dbtree2.c
./src/H5Dchunk.c
./src/H5Dcompact.c
./src/H5Dcontig.c
./src/H5Ddbg.c
./src/H5Ddeprec.c
+./src/H5Dearray.c
./src/H5Defl.c
+./src/H5Dfarray.c
./src/H5Dfill.c
./src/H5Dint.c
./src/H5Dio.c
./src/H5Dlayout.c
./src/H5Dmodule.h
./src/H5Dmpio.c
+./src/H5Dnone.c
./src/H5Doh.c
./src/H5Dpkg.h
./src/H5Dprivate.h
./src/H5Dpublic.h
./src/H5Dscatgath.c
./src/H5Dselect.c
+./src/H5Dsingle.c
./src/H5Dtest.c
./src/H5Dvirtual.c
./src/H5E.c
@@ -542,6 +548,7 @@
./src/H5FAdblkpage.c
./src/H5FAdblock.c
./src/H5FAhdr.c
+./src/H5FAint.c
./src/H5FAmodule.h
./src/H5FApkg.h
./src/H5FAprivate.h
@@ -573,6 +580,7 @@
./src/H5FDspace.c
./src/H5FDstdio.c
./src/H5FDstdio.h
+./src/H5FDtest.c
./src/H5FDwindows.c
./src/H5FDwindows.h
./src/H5FL.c
@@ -583,6 +591,7 @@
./src/H5FS.c
./src/H5FScache.c
./src/H5FSdbg.c
+./src/H5FSint.c
./src/H5FSmodule.h
./src/H5FSpkg.h
./src/H5FSprivate.h
@@ -621,6 +630,7 @@
./src/H5HFhdr.c
./src/H5HFhuge.c
./src/H5HFiblock.c
+./src/H5HFint.c
./src/H5HFiter.c
./src/H5HFman.c
./src/H5HFmodule.h
@@ -695,6 +705,7 @@
./src/H5Odtype.c
./src/H5Oefl.c
./src/H5Ofill.c
+./src/H5Oflush.c
./src/H5Ofsinfo.c
./src/H5Oginfo.c
./src/H5Olayout.c
@@ -708,6 +719,7 @@
./src/H5Opkg.h
./src/H5Opline.c
./src/H5Oprivate.h
+./src/H5Oproxy.c
./src/H5Opublic.h
./src/H5Orefcount.c
./src/H5Osdspace.c
@@ -830,25 +842,37 @@
./src/libhdf5.settings.in
./src/H5win32defs.h
+./test/AtomicWriterReader.txt
./test/COPYING
./test/H5srcdir.h
./test/H5srcdir_str.h.in
./test/Makefile.am
+./test/POSIX_Order_Write_Test_Report.docx
+./test/POSIX_Order_Write_Test_Report.pdf
+./test/SWMR_POSIX_Order_UG.txt
+./test/SWMR_UseCase_UG.txt
./test/accum.c
+./test/accum_swmr_reader.c
./test/app_ref.c
+./test/atomic_reader.c
+./test/atomic_writer.c
./test/bad_compound.h5
./test/be_data.h5
./test/be_extlink1.h5
./test/be_extlink2.h5
./test/big.c
./test/bittests.c
+./test/btree_idx_1_6.h5
+./test/btree_idx_1_8.h5
./test/btree2.c
./test/cache.c
./test/cache_api.c
./test/cache_common.c
./test/cache_common.h
+./test/cache_logging.c
./test/cache_tagging.c
./test/cmpd_dset.c
+./test/cork.c
./test/corrupt_stab_msg.h5
./test/cross_read.c
./test/dangle.c
@@ -887,6 +911,7 @@
./test/filter_fail.c
./test/flush1.c
./test/flush2.c
+./test/flushrefresh.c
./test/gen_bad_ohdr.c
./test/gen_bad_compound.c
./test/gen_bogus.c
@@ -894,6 +919,7 @@
./test/gen_deflate.c
./test/gen_file_image.c
./test/gen_filespace.c
+./test/gen_idx.c
./test/gen_mergemsg.c
./test/gen_new_array.c
./test/gen_new_fill.c
@@ -941,6 +967,19 @@
# ====end distribute this for now. See HDFFV-8236====
./test/specmetaread.h5
./test/stab.c
+./test/swmr.c
+./test/swmr_addrem_writer.c
+./test/swmr_check_compat_vfd.c
+./test/swmr_common.c
+./test/swmr_common.h
+./test/swmr_generator.c
+./test/swmr_reader.c
+./test/swmr_remove_reader.c
+./test/swmr_remove_writer.c
+./test/swmr_sparse_reader.c
+./test/swmr_sparse_writer.c
+./test/swmr_start_write.c
+./test/swmr_writer.c
./test/tarray.c
./test/tarrold.h5
./test/tattr.c
@@ -953,12 +992,16 @@
./test/testcheck_version.sh.in
./test/testerror.sh.in
./test/testlinks_env.sh.in
+./test/testflushrefresh.sh.in
./test/testframe.c
./test/testhdf5.c
./test/testhdf5.h
./test/testlibinfo.sh.in
./test/test_plugin.sh.in
+./test/test_usecases.sh.in
./test/testmeta.c
+./test/testswmr.sh.in
+./test/testvdsswmr.sh.in
./test/tfile.c
./test/tgenprop.c
./test/th5o.c
@@ -989,14 +1032,24 @@
./test/tunicode.c
./test/tvlstr.c
./test/tvltypes.c
+./test/twriteorder.c
./test/unlink.c
./test/unregister.c
+./test/use_append_chunk.c
+./test/use_append_mchunks.c
+./test/use_common.c
+./test/use_disable_mdc_flushes.c
+./test/use.h
./test/vfd.c
./test/test_filters_le.h5
./test/test_filters_be.h5
./test/gen_filters.c
./test/chunk_info.c
./test/vds.c
+./test/vds_swmr.h
+./test/vds_swmr_gen.c
+./test/vds_swmr_reader.c
+./test/vds_swmr_writer.c
./test/testfiles/err_compat_1
./test/testfiles/err_compat_2
@@ -1210,6 +1263,29 @@
./tools/h5diff/testh5diff.sh.in
./tools/h5diff/testph5diff.sh.in
+# h5format_convert sources
+./tools/h5format_convert/Makefile.am
+./tools/h5format_convert/h5fc_chk_idx.c
+./tools/h5format_convert/h5fc_gentest.c
+./tools/h5format_convert/h5format_convert.c
+./tools/h5format_convert/testfiles/h5fc_v_n_all.ddl
+./tools/h5format_convert/testfiles/h5fc_v_bt1.ddl
+./tools/h5format_convert/testfiles/h5fc_v_non_chunked.ddl
+./tools/h5format_convert/testfiles/h5fc_d_file.ddl
+./tools/h5format_convert/testfiles/h5fc_v_ndata_bt1.ddl
+./tools/h5format_convert/testfiles/h5fc_dname.ddl
+./tools/h5format_convert/testfiles/h5fc_nonexistdset_file.ddl
+./tools/h5format_convert/testfiles/h5fc_help.ddl
+./tools/h5format_convert/testfiles/h5fc_v_all.ddl
+./tools/h5format_convert/testfiles/h5fc_nooption.ddl
+./tools/h5format_convert/testfiles/h5fc_v_n_1d.ddl
+./tools/h5format_convert/testfiles/h5fc_nonexistfile.ddl
+./tools/h5format_convert/testfiles/h5fc_non_v3.h5
+./tools/h5format_convert/testfiles/h5fc_latest_v3.h5
+./tools/h5format_convert/testfiles/h5fc_edge_v3.h5
+./tools/h5format_convert/testfiles/h5fc_v1.h5
+./tools/h5format_convert/testh5fc.sh.in
+
# h5repack sources
./tools/h5repack/Makefile.am
./tools/h5repack/dynlib_rpk.c
@@ -1269,6 +1345,10 @@
./tools/misc/Makefile.am
./tools/misc/h5cc.in
+./tools/misc/h5clear.c
+./tools/misc/h5clear_gentest.c
+./tools/misc/clear_open_chk.c
+./tools/misc/testh5clear.sh.in
./tools/misc/h5debug.c
./tools/misc/h5mkgrp.c
./tools/misc/h5redeploy.in
@@ -1318,6 +1398,8 @@
./tools/h5stat/testfiles/h5stat_filters.h5
./tools/h5stat/testfiles/h5stat_help1.ddl
./tools/h5stat/testfiles/h5stat_help2.ddl
+./tools/h5stat/testfiles/h5stat_idx.h5
+./tools/h5stat/testfiles/h5stat_idx.ddl
./tools/h5stat/testfiles/h5stat_links1.ddl
./tools/h5stat/testfiles/h5stat_links2.ddl
./tools/h5stat/testfiles/h5stat_links3.ddl
@@ -1457,6 +1539,9 @@
./tools/testfiles/tdset-3s.ddl
./tools/testfiles/tdset.h5
./tools/testfiles/tdset2.h5
+./tools/testfiles/tdset_idx.ls
+./tools/testfiles/tdset_idx.ddl
+./tools/testfiles/tdset_idx.h5
./tools/testfiles/tempty.ddl
./tools/testfiles/tempty.h5
./tools/testfiles/tenum.h5
@@ -2058,6 +2143,7 @@
./tools/h5diff/testfiles/h5diff_457.txt
./tools/h5diff/testfiles/h5diff_458.txt
./tools/h5diff/testfiles/h5diff_459.txt
+./tools/h5diff/testfiles/h5diff_idx.txt
./tools/h5diff/testfiles/h5diff_465.txt
./tools/h5diff/testfiles/h5diff_466.txt
./tools/h5diff/testfiles/h5diff_467.txt
@@ -2106,6 +2192,8 @@
./tools/h5diff/testfiles/h5diff_basic2.h5
./tools/h5diff/testfiles/h5diff_dset1.h5
./tools/h5diff/testfiles/h5diff_dset2.h5
+./tools/h5diff/testfiles/h5diff_dset_idx1.h5
+./tools/h5diff/testfiles/h5diff_dset_idx2.h5
./tools/h5diff/testfiles/h5diff_dtypes.h5
./tools/h5diff/testfiles/h5diff_empty.h5
./tools/h5diff/testfiles/h5diff_hyper1.h5
@@ -2218,6 +2306,8 @@
./tools/h5copy/testfiles/h5copy_extlinks_trg.h5
./tools/h5copy/testfiles/h5copy_extlinks_src.out.ls
./tools/h5copy/testfiles/h5copy_misc1.out
+./tools/h5copy/testfiles/h5copytst_new.h5
+./tools/h5copy/testfiles/h5copytst_new.out.ls
# test files for h5mkgrp
./tools/testfiles/h5mkgrp_nested_p.ls
@@ -2294,6 +2384,9 @@
./hl/src/H5IM.c
./hl/src/H5IMprivate.h
./hl/src/H5IMpublic.h
+./hl/src/H5LD.c
+./hl/src/H5LDprivate.h
+./hl/src/H5LDpublic.h
./hl/src/H5LT.c
./hl/src/H5LTanalyze.c
./hl/src/H5LTanalyze.l
@@ -2310,34 +2403,38 @@
./hl/src/H5TBpublic.h
./hl/src/hdf5_hl.h
./hl/test/COPYING
-./hl/test/H5srcdir_str.h.in
-./hl/test/Makefile.am
+./hl/test/dectris_hl_perf.c
./hl/test/dsdata.txt
./hl/test/dslat.txt
./hl/test/dslon.txt
-./hl/test/dtype_file.txt
./hl/test/dtype_file_readable.txt
+./hl/test/dtype_file.txt
./hl/test/earth.pal
+./hl/test/gen_test_ds.c
+./hl/test/gen_test_ld.c
./hl/test/h5hltest.h
-./hl/test/image8.txt
+./hl/test/H5srcdir_str.h.in
./hl/test/image24pixel.txt
./hl/test/image24plane.txt
+./hl/test/image8.txt
+./hl/test/Makefile.am
./hl/test/pal_rgb.h
./hl/test/sepia.pal
-./hl/test/dectris_hl_perf.c
-./hl/test/gen_test_ds.c
+./hl/test/test_ds_be.h5
./hl/test/test_ds.c
+./hl/test/test_dset_append.c
./hl/test/test_dset_opt.c
+./hl/test/test_ds_le.h5
./hl/test/test_file_image.c
./hl/test/test_image.c
+./hl/test/test_ld.c
+./hl/test/test_ld.h5
./hl/test/test_lite.c
./hl/test/test_packet.c
-./hl/test/test_table.c
-./hl/test/test_ds_le.h5
-./hl/test/test_ds_be.h5
-./hl/test/test_table_le.h5
./hl/test/test_table_be.h5
+./hl/test/test_table.c
./hl/test/test_table_cray.h5
+./hl/test/test_table_le.h5
./hl/test/usa.wri
# tools
@@ -2358,7 +2455,53 @@
./hl/tools/gif2h5/testfiles/ex_image2.h5
./hl/tools/gif2h5/testfiles/image1.gif
./hl/tools/gif2h5/testfiles/h52giftst.h5
+#
+./hl/tools/h5watch/Makefile.am
+./hl/tools/h5watch/extend_dset.c
+./hl/tools/h5watch/h5watch.c
+./hl/tools/h5watch/h5watchgentest.c
+./hl/tools/h5watch/swmr_check_compat_vfd.c
+./hl/tools/h5watch/testh5watch.sh.in
+
+# expected test output from testing h5watch
+#
+./hl/tools/testfiles/w-err-cmpd1.ddl
+./hl/tools/testfiles/w-err-cmpd2.ddl
+./hl/tools/testfiles/w-err-cmpd3.ddl
+./hl/tools/testfiles/w-err-cmpd4.ddl
+./hl/tools/testfiles/w-err-cmpd5.ddl
+./hl/tools/testfiles/w-err-dset1.ddl
+./hl/tools/testfiles/w-err-dset2.ddl
+./hl/tools/testfiles/w-err-dset-nomax.ddl
+./hl/tools/testfiles/w-err-dset-none.ddl
+./hl/tools/testfiles/w-err-file.ddl
+./hl/tools/testfiles/w-err-poll0.ddl
+./hl/tools/testfiles/w-err-poll.ddl
+./hl/tools/testfiles/w-err-width.ddl
+./hl/tools/testfiles/w-ext-cmpd.ddl
+./hl/tools/testfiles/w-ext-cmpd-esc.ddl
+./hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
+./hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
+./hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
+./hl/tools/testfiles/w-ext-cmpd-f1.ddl
+./hl/tools/testfiles/w-ext-cmpd-f2.ddl
+./hl/tools/testfiles/w-ext-cmpd-ff3.ddl
+./hl/tools/testfiles/w-ext-cmpd-label.ddl
+./hl/tools/testfiles/w-ext-cmpd-two.ddl
+./hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
+./hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
+./hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
+./hl/tools/testfiles/w-ext-early.ddl
+./hl/tools/testfiles/w-ext-late.ddl
+./hl/tools/testfiles/w-ext-one-d.ddl
+./hl/tools/testfiles/w-ext-one.ddl
+./hl/tools/testfiles/w-ext-one-simple.ddl
+./hl/tools/testfiles/w-ext-two-d.ddl
+./hl/tools/testfiles/w-ext-two.ddl
+./hl/tools/testfiles/w-ext-two-width.ddl
+./hl/tools/testfiles/w-help1.ddl
+#
# hl fortran
./hl/fortran/COPYING
./hl/fortran/Makefile.am
@@ -2565,6 +2708,7 @@
./hl/src/Makefile.in
./hl/test/Makefile.in
./hl/tools/gif2h5/Makefile.in
+./hl/tools/h5watch/Makefile.in
./hl/tools/Makefile.in
./m4/libtool.m4
./m4/lt~obsolete.m4
@@ -2585,6 +2729,7 @@
./tools/h5copy/Makefile.in
./tools/h5diff/Makefile.in
./tools/h5dump/Makefile.in
+./tools/h5format_convert/Makefile.in
./tools/h5import/Makefile.in
./tools/h5jam/Makefile.in
./tools/h5ls/Makefile.in
diff --git a/bin/cmakehdf5 b/bin/cmakehdf5
index 48869cf..772e54c 100755
--- a/bin/cmakehdf5
+++ b/bin/cmakehdf5
@@ -12,6 +12,8 @@
DPRINT=:
#DPRINT=echo
+# use the ctest scripting method if --script is given
+if [ "$1" != "--script" ]; then
# variable names
# The "extra" number is the step number and easier to see all logfiles in
# the sorted order of steps
@@ -351,3 +353,267 @@ exit_code=$?
# Show a closing time stamp
TIMESTAMP
exit $exit_code
+
+else
+# ---------------
+# older version
+# ---------------
+
+# variable names
+progname=`basename $0` # program name
+cminfile="cmakemin.$$" # Cmake minimum file
+cfgfile=$progname.$$ # configure file
+ctest_log=ctest.log # output of ctest script
+install_log=install.log # output of installation
+$DPRINT $cfgfile
+
+# Remove temporary generated files if exit 0
+trap "rm -f $cminfile $cfgfile" 0
+
+#=============
+# Function definitions
+#=============
+TIMESTAMP()
+{
+ echo "=====" "`date`" "====="
+}
+
+
+#==========
+# main
+#==========
+# Show a start time stamp
+TIMESTAMP
+
+# Explain what and where log files are.
+cat <<EOF
+$ctest_log: output of ctest script.
+$install_log: output of installation
+Log files will be stored in Testing/Temporary:
+ LastConfigure_<timestamp>.log: output of configure
+ LastBuild_<timestamp>.log: output of build
+ LastTest_<timestamp>.log: output of testing
+ LastTestsFailed_<timestamp>.log: list of failed tests
+
+EOF
+
+# First generate the two needed input files, the $cimnfile and $cfgfile.
+# Then use ctest to use the two input files.
+
+#==========
+# create the configure file
+#==========
+# Create the cmake minimum required file to be used by the following
+# configure file. Though not absolute needed, it is better to generate
+# this file before the configure file. Quote the EOF to preven substitution
+# in the text.
+#==========
+#==========
+cat > $cfgfile <<'EOF'
+cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+########################################################
+# This dashboard is maintained by The HDF Group
+# For any comments please contact cdashhelp@hdfgroup.org
+#
+########################################################
+
+set (CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
+set (CTEST_SOURCE_DIRECTORY "../hdf5")
+set (CTEST_BINARY_DIRECTORY ".")
+set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
+set (CTEST_BUILD_CONFIGURATION "Release")
+set (CTEST_MAX_N 8)
+
+# -- CDash variables
+set (LOCAL_NO_SUBMIT TRUE) # No CDash submit.
+set (MODEL "Experimental")
+set (CDASH_LOCAL TRUE)
+set (SITE_BUILDNAME_SUFFIX "cmakehdf5")
+
+# -- URL set for internal check, default is to not update
+set (LOCAL_SKIP_UPDATE TRUE)
+set (REPOSITORY_URL "http://svn.${hdfgroup_url}/hdf5/branches/hdf5_1_8")
+# -- Standard build options
+set (ADD_BUILD_OPTIONS "-DCMAKE_INSTALL_PREFIX:PATH=${CTEST_BINARY_DIRECTORY} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=\"SVN\" -DHDF5_PACKAGE_EXTLIBS:BOOL=ON")
+
+# Use multiple CPU cores to build
+include(ProcessorCount)
+ProcessorCount(N)
+if(NOT N EQUAL 0)
+ if(N GREATER ${CTEST_MAX_N})
+ set(N ${CTEST_MAX_N})
+ endif(N GREATER ${CTEST_MAX_N})
+ set(CTEST_BUILD_FLAGS -j${N})
+ set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
+endif()
+
+# -----------------------------------------------------------
+# -- Get environment
+# -----------------------------------------------------------
+ ## -- set hostname
+ ## --------------------------
+ find_program (HOSTNAME_CMD NAMES hostname)
+ exec_program (${HOSTNAME_CMD} ARGS OUTPUT_VARIABLE HOSTNAME)
+ set (CTEST_SITE "${HOSTNAME}${CTEST_SITE_EXT}")
+ find_program (UNAME NAMES uname)
+ macro (getuname name flag)
+ exec_program ("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}")
+ endmacro (getuname)
+
+ getuname (osname -s)
+ getuname (osrel -r)
+ getuname (cpu -m)
+
+ if (SITE_BUILDNAME_SUFFIX)
+ set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}-${SITE_BUILDNAME_SUFFIX}")
+ else (SITE_BUILDNAME_SUFFIX)
+ set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}")
+ endif (SITE_BUILDNAME_SUFFIX)
+# -----------------------------------------------------------
+
+set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
+
+#-----------------------------------------------------------------------------
+# MAC machines need special option
+#-----------------------------------------------------------------------------
+if (APPLE)
+ # Compiler choice
+ execute_process(COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process(COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set(ENV{CC} "${XCODE_CC}")
+ set(ENV{CXX} "${XCODE_CXX}")
+ # Shared fortran is not supported, build static
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
+endif (APPLE)
+
+
+# -----------------------------------------------------------
+find_package (Subversion)
+set (CTEST_UPDATE_COMMAND "${Subversion_SVN_EXECUTABLE}")
+# -- Only clean build folder if LOCAL_CLEAR_BUILD is set
+if (LOCAL_CLEAR_BUILD)
+ set (CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
+ if(NOT EXISTS "${CTEST_BINARY_DIRECTORY}")
+ file(MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
+ else()
+ ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
+ endif()
+endif (LOCAL_CLEAR_BUILD)
+
+#-----------------------------------------------------------------------------
+# Send the main script as a note.
+list (APPEND CTEST_NOTES_FILES
+ "${CMAKE_CURRENT_LIST_FILE}"
+ "${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake"
+ )
+
+# Check for required variables.
+foreach (req
+ CTEST_CMAKE_GENERATOR
+ CTEST_SITE
+ CTEST_BUILD_NAME
+ )
+ if (NOT DEFINED ${req})
+ message(FATAL_ERROR "The containing script must set ${req}")
+ endif (NOT DEFINED ${req})
+endforeach (req)
+
+## -- set output to english
+set($ENV{LC_MESSAGES} "en_EN")
+
+#-----------------------------------------------------------------------------
+# Initialize the CTEST commands
+#------------------------------
+set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
+set (CTEST_CONFIGURE_COMMAND
+ "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_BUILD_CONFIGURATION} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\"")
+
+# Print summary information.
+foreach (v
+ CTEST_SITE
+ CTEST_BUILD_NAME
+ CTEST_SOURCE_DIRECTORY
+ CTEST_BINARY_DIRECTORY
+ CTEST_CMAKE_GENERATOR
+ CTEST_BUILD_CONFIGURATION
+ CTEST_CONFIGURE_COMMAND
+ CTEST_SCRIPT_DIRECTORY
+ )
+ set (vars "${vars} ${v}=[${${v}}]\n")
+endforeach (v)
+message ("Dashboard script configuration:\n${vars}\n")
+
+ctest_start (${MODEL} TRACK ${MODEL})
+if (NOT LOCAL_SKIP_UPDATE)
+ ctest_update (SOURCE "${CTEST_SOURCE_DIRECTORY}")
+endif (NOT LOCAL_SKIP_UPDATE)
+if(NOT res STREQUAL "0")
+ message (FATAL_ERROR "Configure FAILED")
+endif()
+message ("Configure DONE")
+configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
+ctest_read_custom_files ("${CTEST_BINARY_DIRECTORY}")
+ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}" RETURN_VALUE res)
+if (NOT LOCAL_NO_SUBMIT)
+ ctest_submit (PARTS Update Configure Notes)
+endif (NOT LOCAL_NO_SUBMIT)
+ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND RETURN_VALUE res)
+if (NOT LOCAL_NO_SUBMIT)
+ ctest_submit (PARTS Build)
+endif (NOT LOCAL_NO_SUBMIT)
+if(NOT res STREQUAL "0")
+ message (FATAL_ERROR "Build FAILED")
+endif()
+message ("build DONE")
+if (NOT LOCAL_SKIP_TEST)
+ ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
+ if (NOT LOCAL_NO_SUBMIT)
+ ctest_submit (PARTS Test)
+ endif (NOT LOCAL_NO_SUBMIT)
+ if(NOT res STREQUAL "0")
+ message (FATAL_ERROR "Test FAILED")
+ endif()
+ message ("test DONE")
+endif (NOT LOCAL_SKIP_TEST)
+if(NOT LOCAL_MEMCHECK_TEST)
+ ##-----------------------------------------------
+ ## Package the product
+ ##-----------------------------------------------
+ execute_process(COMMAND cpack -C ${CTEST_BUILD_CONFIGURATION} -V
+ WORKING_DIRECTORY ${CTEST_BINARY_DIRECTORY}
+ RESULT_VARIABLE cpackResult
+ OUTPUT_VARIABLE cpackLog
+ ERROR_VARIABLE cpackLog.err
+ )
+ file(WRITE ${CTEST_BINARY_DIRECTORY}/cpack.log "${cpackLog.err}" "${cpackLog}")
+endif(NOT LOCAL_MEMCHECK_TEST)
+#-----------------------------------------------------------------------------
+
+message ("DONE")
+EOF
+
+
+# Run ctest
+ctest -S $cfgfile -C Release -V -O $ctest_log
+exit_code=$?
+if [ $exit_code = 0 ]; then
+ echo CTest script completed without error
+else
+ echo Error encountered CTest script
+fi
+# Using HDF5-*.sh because actual name is unavailable
+install_sh=HDF5-*.sh
+echo installing with $install_sh ...
+./$install_sh --skip-license > $install_log
+exit_code=$?
+if [ $exit_code = 0 ]; then
+ echo Complete without error
+else
+ echo Error encountered
+fi
+TIMESTAMP
+exit $exit_code
+
+fi
+
diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp
index f84ce3a..6871f06 100644
--- a/c++/test/tfile.cpp
+++ b/c++/test/tfile.cpp
@@ -732,7 +732,7 @@ static void test_libver_bounds()
/* Run the tests */
test_libver_bounds_real(H5F_LIBVER_EARLIEST, H5O_VERSION_1, H5F_LIBVER_LATEST, H5O_VERSION_2);
- test_libver_bounds_real(H5F_LIBVER_LATEST, H5O_VERSION_2, H5F_LIBVER_EARLIEST, H5O_VERSION_1);
+ test_libver_bounds_real(H5F_LIBVER_LATEST, H5O_VERSION_2, H5F_LIBVER_EARLIEST, H5O_VERSION_2);
PASSED();
} /* end test_libver_bounds() */
diff --git a/c++/test/th5s.h5 b/c++/test/th5s.h5
index 7a0bfb3..bc2b666 100644
--- a/c++/test/th5s.h5
+++ b/c++/test/th5s.h5
Binary files differ
diff --git a/configure.ac b/configure.ac
index 0c3514c..e14ae15 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3058,10 +3058,14 @@ AC_CONFIG_FILES([src/libhdf5.settings
test/Makefile
test/testcheck_version.sh
test/testerror.sh
+ test/testflushrefresh.sh
test/H5srcdir_str.h
test/testlibinfo.sh
test/testlinks_env.sh
+ test/testswmr.sh
test/test_plugin.sh
+ test/test_usecases.sh
+ test/testvdsswmr.sh
testpar/Makefile
tools/Makefile
tools/h5dump/Makefile
@@ -3087,11 +3091,14 @@ AC_CONFIG_FILES([src/libhdf5.settings
tools/lib/Makefile
tools/misc/Makefile
tools/misc/h5cc
+ tools/misc/testh5clear.sh
tools/misc/testh5mkgrp.sh
tools/misc/testh5repart.sh
tools/misc/vds/Makefile
tools/h5stat/testh5stat.sh
tools/h5stat/Makefile
+ tools/h5format_convert/Makefile
+ tools/h5format_convert/testh5fc.sh
tools/perform/Makefile
examples/Makefile
examples/run-c-ex.sh
@@ -3120,6 +3127,8 @@ AC_CONFIG_FILES([src/libhdf5.settings
hl/tools/Makefile
hl/tools/gif2h5/Makefile
hl/tools/gif2h5/h52giftest.sh
+ hl/tools/h5watch/Makefile
+ hl/tools/h5watch/testh5watch.sh
hl/examples/Makefile
hl/examples/run-hlc-ex.sh
hl/c++/Makefile
diff --git a/hl/src/H5DO.c b/hl/src/H5DO.c
index 99dbd93..3b80eba 100644
--- a/hl/src/H5DO.c
+++ b/hl/src/H5DO.c
@@ -97,3 +97,187 @@ done:
return(ret_value);
} /* end H5DOwrite_chunk() */
+
+/*
+ * Function: H5DOappend()
+ *
+ * Purpose: To append elements to a dataset.
+ * axis: the dataset dimension (zero-based) for the append
+ * extension: the # of elements to append for the axis-th dimension
+ * memtype: the datatype
+ * buf: buffer with data for the append
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ * Note:
+ * This routine is copied from the fast forward feature branch: features/hdf5_ff
+ * src/H5FF.c:H5DOappend() with the following modifications:
+ * 1) Remove and replace macro calls such as
+ * FUNC_ENTER_API, H5TRACE, HGOTO_ERROR
+ * accordingly because hl does not have these macros
+ * 2) Replace H5I_get_type() by H5Iget_type()
+ * 3) Replace H5P_isa_class() by H5Pisa_class()
+ * 4) Fix a bug in the following: replace extension by size[axis]
+ * if(extension < old_size) {
+ * ret_value = FAIL;
+ * goto done;
+ * }
+ */
+herr_t
+H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension,
+ hid_t memtype, const void *buf)
+{
+
+ hsize_t size[H5S_MAX_RANK]; /* The new size (after extension */
+ hsize_t old_size = 0; /* The size of the dimension to be extended */
+ int sndims; /* Number of dimensions in dataspace (signed) */
+ unsigned ndims; /* Number of dimensions in dataspace */
+ hid_t space_id = FAIL; /* Old file space */
+ hid_t new_space_id = FAIL; /* New file space (after extension) */
+ hid_t mem_space_id = FAIL; /* Memory space for data buffer */
+ hssize_t snelmts; /* Number of elements in selection (signed) */
+ hsize_t nelmts; /* Number of elements in selection */
+ hid_t dapl = FAIL; /* Dataset access property list */
+
+ hsize_t start[H5S_MAX_RANK]; /* H5Sselect_Hyperslab: starting offset */
+ hsize_t count[H5S_MAX_RANK]; /* H5Sselect_hyperslab: # of blocks to select */
+ hsize_t stride[H5S_MAX_RANK]; /* H5Sselect_hyperslab: # of elements to move when selecting */
+ hsize_t block[H5S_MAX_RANK]; /* H5Sselect_hyperslab: # of elements in a block */
+
+ hsize_t *boundary = NULL; /* Boundary set in append flush property */
+ H5D_append_cb_t append_cb; /* Callback function set in append flush property */
+ void *udata; /* User data set in append flush property */
+ hbool_t hit = FALSE; /* Boundary is hit or not */
+ hsize_t k; /* Local index variable */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = FAIL; /* Return value */
+
+ /* check arguments */
+ if(H5I_DATASET != H5Iget_type(dset_id))
+ goto done;
+
+ /* Get the default dataset transfer property list if the user didn't provide one */
+ if(H5P_DEFAULT == dxpl_id)
+ dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ else
+ if(TRUE != H5Pisa_class(dxpl_id, H5P_DATASET_XFER))
+ goto done;
+
+ /* Get the dataspace of the dataset */
+ if(FAIL == (space_id = H5Dget_space(dset_id)))
+ goto done;
+
+ /* Get the rank of this dataspace */
+ if((sndims = H5Sget_simple_extent_ndims(space_id)) < 0)
+ goto done;
+ ndims = (unsigned)sndims;
+
+ /* Verify correct axis */
+ if(axis >= ndims)
+ goto done;
+
+ /* Get the dimensions sizes of the dataspace */
+ if(H5Sget_simple_extent_dims(space_id, size, NULL) < 0)
+ goto done;
+
+ /* Adjust the dimension size of the requested dimension,
+ but first record the old dimension size */
+ old_size = size[axis];
+ size[axis] += extension;
+ if(size[axis] < old_size)
+ goto done;
+
+ /* Set the extent of the dataset to the new dimension */
+ if(H5Dset_extent(dset_id, size) < 0)
+ goto done;
+
+ /* Get the new dataspace of the dataset */
+ if(FAIL == (new_space_id = H5Dget_space(dset_id)))
+ goto done;
+
+ /* Select a hyperslab corresponding to the append operation */
+ for(u = 0 ; u < ndims ; u++) {
+ start[u] = 0;
+ stride[u] = 1;
+ count[u] = size[u];
+ block[u] = 1;
+ if(u == axis) {
+ count[u] = extension;
+ start[u] = old_size;
+ } /* end if */
+ } /* end for */
+ if(FAIL == H5Sselect_hyperslab(new_space_id, H5S_SELECT_SET, start, stride, count, block))
+ goto done;
+
+ /* The # of elemnts in the new extended dataspace */
+ if((snelmts = H5Sget_select_npoints(new_space_id)) < 0)
+ goto done;
+ nelmts = (hsize_t)snelmts;
+
+ /* create a memory space */
+ mem_space_id = H5Screate_simple(1, &nelmts, NULL);
+
+ /* Write the data */
+ if(H5Dwrite(dset_id, memtype, mem_space_id, new_space_id, dxpl_id, buf) < 0)
+ goto done;
+
+ /* Obtain the dataset's access property list */
+ if((dapl = H5Dget_access_plist(dset_id)) < 0)
+ goto done;
+
+ /* Allocate the boundary array */
+ boundary = (hsize_t *)HDmalloc(ndims * sizeof(hsize_t));
+
+ /* Retrieve the append flush property */
+ if(H5Pget_append_flush(dapl, ndims, boundary, &append_cb, &udata) < 0)
+ goto done;
+
+ /* No boundary for this axis */
+ if(boundary[axis] == 0)
+ goto done;
+
+ /* Determine whether a boundary is hit or not */
+ for(k = start[axis]; k < size[axis]; k++)
+ if(!((k + 1) % boundary[axis])) {
+ hit = TRUE;
+ break;
+ }
+
+ if(hit) { /* Hit the boundary */
+ /* Invoke callback if there is one */
+ if(append_cb && append_cb(dset_id, size, udata) < 0)
+ goto done;
+
+ /* Do a dataset flush */
+ if(H5Dflush(dset_id) < 0)
+ goto done;
+ } /* end if */
+
+ /* Indicate success */
+ ret_value = SUCCEED;
+
+done:
+ /* Close old dataspace */
+ if(space_id != FAIL && H5Sclose(space_id) < 0)
+ ret_value = FAIL;
+
+ /* Close new dataspace */
+ if(new_space_id != FAIL && H5Sclose(new_space_id) < 0)
+ ret_value = FAIL;
+
+ /* Close memory dataspace */
+ if(mem_space_id != FAIL && H5Sclose(mem_space_id) < 0)
+ ret_value = FAIL;
+
+ /* Close the dataset access property list */
+ if(dapl != FAIL && H5Pclose(dapl) < 0)
+ ret_value = FAIL;
+
+ if(boundary)
+ HDfree(boundary);
+
+ return ret_value;
+} /* H5DOappend() */
+
diff --git a/hl/src/H5DOpublic.h b/hl/src/H5DOpublic.h
index 774709e..1e5eb7a 100644
--- a/hl/src/H5DOpublic.h
+++ b/hl/src/H5DOpublic.h
@@ -22,17 +22,16 @@ extern "C" {
/*-------------------------------------------------------------------------
*
- * Direct chunk write function
+ * "Optimized dataset" routines.
*
*-------------------------------------------------------------------------
*/
-H5_HLDLL herr_t H5DOwrite_chunk(hid_t dset_id,
- hid_t dxpl_id,
- uint32_t filters,
- const hsize_t *offset,
- size_t data_size,
- const void *buf);
+H5_HLDLL herr_t H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters,
+ const hsize_t *offset, size_t data_size, const void *buf);
+
+H5_HLDLL herr_t H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis,
+ size_t extension, hid_t memtype, const void *buf);
#ifdef __cplusplus
}
diff --git a/hl/src/H5LD.c b/hl/src/H5LD.c
new file mode 100644
index 0000000..4abd740
--- /dev/null
+++ b/hl/src/H5LD.c
@@ -0,0 +1,639 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+* Copyright by The HDF Group. *
+* Copyright by the Board of Trustees of the University of Illinois. *
+* All rights reserved. *
+* *
+* This file is part of HDF5. The full HDF5 copyright notice, including *
+* terms governing use, modification, and redistribution, is contained in *
+* the files COPYING and Copyright.html. COPYING can be found at the root *
+* of the source code distribution tree; Copyright.html can be found at the *
+* root level of an installed copy of the electronic HDF5 document set and *
+* is linked from the top-level documents page. It can also be found at *
+* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+* access to either file, you may request a copy from help@hdfgroup.org. *
+* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <stdio.h>
+#include "H5LDprivate.h"
+
+/*-------------------------------------------------------------------------
+ *
+ * internal functions
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t H5LD_construct_info(H5LD_memb_t *memb, hid_t par_tid);
+static herr_t H5LD_get_dset_dims(hid_t did, hsize_t *cur_dims);
+static size_t H5LD_get_dset_type_size(hid_t did, const char *fields);
+static herr_t H5LD_get_dset_elmts(hid_t did, const hsize_t *prev_dims,
+ const hsize_t *cur_dims, const char *fields, void *buf);
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LD_clean_vector
+ *
+ * Purpose: Process the vector of info:
+ * 1) free the array of pointers to member names in listv[n]
+ * 2) close the type id of the last member in listv[n]
+ * 3) free the H5LD_memb_t structure itself as pointed to by listv[n]
+ *
+ * Return: void
+ *
+ * Programmer: Vailin Choi; Aug 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5LD_clean_vector(H5LD_memb_t *listv[])
+{
+ unsigned n; /* Local index variable */
+
+ HDassert(listv);
+
+ /* Go through info for each field stored in listv[] */
+ for(n = 0; listv[n] != NULL; n++) {
+ if(listv[n]->names) {
+ HDfree(listv[n]->names);
+ listv[n]->names = NULL;
+ } /* end if */
+
+ /* Close the type id of the last member in the field */
+ if(!(listv[n]->last_tid < 0)) {
+ H5Tclose(listv[n]->last_tid);
+ listv[n]->last_tid = -1;
+ } /* end if */
+
+ /* Free the H5LD_memb_t structure for the field */
+ HDfree(listv[n]);
+ listv[n] = NULL;
+ } /* end for */
+} /* H5LD_clean_vector() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LD_construct_info()
+ *
+ * Purpose: Get the remaining info for a field:
+ * 1) Get the type id of the last member in the field
+ * 2) Get the total offset of all the members in the field
+ * 3) Get the type size of the last member in the field
+ *
+ * Return: Success: 0
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Aug 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5LD_construct_info(H5LD_memb_t *memb, hid_t par_tid)
+{
+ hid_t tmp_tid = -1; /* Dataset type id */
+ unsigned i; /* Local index variable */
+ herr_t ret_value = FAIL; /* Return value */
+
+ /* Make a copy of the incoming datatype */
+ tmp_tid = H5Tcopy(par_tid);
+
+ /* Validate all the members in a field */
+ for(i = 0; memb->names[i] != NULL; i++) {
+ hid_t memb_tid; /* Type id for a member in a field */
+ int idx; /* Index # of a member in a compound datatype */
+
+ /* Get the member index and member type id */
+ if((idx = H5Tget_member_index(tmp_tid, memb->names[i])) < 0)
+ goto done;
+ if((memb_tid = H5Tget_member_type(tmp_tid, (unsigned)idx)) < 0)
+ goto done;
+
+ /* Sum up the offset of all the members in the field */
+ memb->tot_offset += H5Tget_member_offset(tmp_tid, (unsigned)idx);
+ if(H5Tclose(tmp_tid) < 0)
+ goto done;
+ tmp_tid = memb_tid;
+ } /* end for */
+
+ /* Get the type size of the last member in the field */
+ memb->last_tsize = H5Tget_size(tmp_tid);
+
+ /* Save the type id of the last member in the field */
+ memb->last_tid = H5Tcopy(tmp_tid);
+
+ /* Indicate success */
+ ret_value = SUCCEED;
+
+done:
+ H5E_BEGIN_TRY
+ H5Tclose(tmp_tid);
+ H5E_END_TRY
+
+ return(ret_value);
+} /* H5LD_construct_info() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LD_construct_vector
+ *
+ * Purpose: Process the comma-separated list of fields in "fields" as follows:
+ * Example:
+ * "fields": "a.b.c,d"
+ * listv[0]->tot_offset = total offset of "a" & "b" & "c"
+ * listv[0]->last_tid = type id of "c"
+ * listv[0]->last_tsize = type size of "c"
+ * listv[0]->names[0] = "a"
+ * listv[0]->names[1] = "b"
+ * listv[0]->names[2] = "c"
+ * listv[0]->names[3] = NULL
+ *
+ * listv[1]->tot_offset = offset of "d"
+ * listv[1]->last_tid = type id of "d"
+ * listv[1]->last_tsize = type size of "d"
+ * listv[1]->names[0] = "d"
+ * listv[1]->names[1] = NULL
+ *
+ * Return: Success: # of comma-separated fields in "fields"
+ * Failure: negative value
+ *
+ * Programmer: Vailin Choi; Aug 2010
+ *
+*-------------------------------------------------------------------------
+*/
+int
+H5LD_construct_vector(char *fields, H5LD_memb_t *listv[]/*OUT*/, hid_t par_tid)
+{
+ int nfields; /* The # of comma-separated fields in "fields" */
+ hbool_t end_of_fields = FALSE; /* end of "fields" */
+ char *fields_ptr; /* Pointer to "fields" */
+ int ret_value = FAIL; /* Return value */
+
+ HDassert(listv);
+ HDassert(fields);
+
+ fields_ptr = fields;
+ nfields = 0;
+
+ /* Process till end of "fields" */
+ while(!end_of_fields) {
+ H5LD_memb_t *memb = NULL; /* Pointer to structure for storing a field's info */
+ char *cur; /* Pointer to a member in a field */
+ size_t len; /* Estimated # of members in a field */
+ hbool_t gotcomma = FALSE; /* A comma encountered */
+ hbool_t gotmember = FALSE; /* Getting member in a field */
+ hbool_t valid = TRUE; /* Whether a field being processed is valid or not */
+ int j = 0; /* The # of members in a field */
+
+ len = (HDstrlen(fields_ptr) / 2) + 2;
+
+ /* Allocate memory for an H5LD_memb_t for storing a field's info */
+ if(NULL == (memb = (H5LD_memb_t *)HDcalloc((size_t)1, sizeof(H5LD_memb_t))))
+ goto done;
+
+ /* Allocate memory for an array of pointers to member names */
+ if(NULL == (memb->names = (char **)HDcalloc(len, sizeof(char *))))
+ goto done;
+
+ memb->names[j] = fields_ptr;
+ memb->last_tid = -1;
+ cur = fields_ptr;
+
+ /* Continue processing till: not valid or comma encountered or "fields" ended */
+ while(valid && !gotcomma && !end_of_fields) {
+ switch(*fields_ptr) {
+ case '\0': /* end of list */
+ if(gotmember) { /* getting something and end of "fields" */
+ *cur++ = '\0';;
+ memb->names[++j] = NULL;
+ } /* end if */
+ else /* getting nothing but end of list */
+ valid = FALSE;
+ end_of_fields = TRUE;
+ break;
+
+ case '\\': /* escape character */
+ ++fields_ptr; /* skip it */
+ if(*fields_ptr == '\0')
+ valid = FALSE;
+ else {
+ *cur++ = *fields_ptr++;
+ gotmember = TRUE;
+ } /* end else */
+ break;
+
+ case '.': /* nested field separator */
+ *fields_ptr++ = *cur++ = '\0';;
+ if(gotmember) {
+ memb->names[++j] = cur;
+ gotmember = FALSE;
+ } /* end if */
+ else
+ valid = FALSE;
+ break;
+
+ case ',': /* field separator */
+ *fields_ptr++ = *cur++ = '\0';;
+ if(gotmember) {
+ memb->names[++j] = NULL;
+ gotmember = FALSE;
+ } /* end if */
+ else
+ valid = FALSE;
+ gotcomma = TRUE;
+ break;
+
+ default:
+ *cur++ = *fields_ptr++;
+ gotmember = TRUE;
+ break;
+ } /* end switch */
+ } /* while (valid && !gotcomma && !end_of_fields) */
+
+ /* If valid, put into listv and continue processing further info */
+ if(valid) {
+ listv[nfields++] = memb;
+ if(H5LD_construct_info(memb, par_tid) < 0)
+ goto done;
+ } /* end if */
+ else {
+ if(memb)
+ HDfree(memb);
+ goto done;
+ } /* end else */
+ } /* while !end_of_fields */
+
+ /* Indicate success */
+ ret_value = nfields;
+
+done:
+ listv[nfields] = NULL;
+ if(ret_value == FAIL)
+ H5LD_clean_vector(listv);
+
+ return(ret_value);
+} /* H5LD_construct_vector() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LD_get_dset_dims
+ *
+ * Purpose: To return the current size for each dimension of the
+ * dataset's dataspace
+ *
+ * Return: Success: 0
+ * Failure: negative value
+ *
+ * Programmer: Vailin Choi; March 2010
+ *
+*-------------------------------------------------------------------------
+*/
+static herr_t
+H5LD_get_dset_dims(hid_t did, hsize_t *cur_dims)
+{
+ hid_t sid = -1; /* Dataspace ID */
+ herr_t ret_value = FAIL; /* Return Value */
+
+ /* Verify parameter */
+ if(cur_dims == NULL)
+ goto done;
+
+ /* Get the dataset's dataspace */
+ if((sid = H5Dget_space(did)) < 0)
+ goto done;
+
+ /* Get the current dimension size */
+ if(H5Sget_simple_extent_dims(sid, cur_dims, NULL) < 0)
+ goto done;
+
+ /* Indicate success */
+ ret_value = SUCCEED;
+
+done:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ } H5E_END_TRY;
+
+ return(ret_value);
+} /* H5LD_get_dset_dims() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LD_get_dset_type_size
+ *
+ * Purpose: To return the size of the dataset's datatype in bytes
+ * null "fields": return the size of the dataset's datatype
+ * non-null "fields": return the size of the dataset's datatype
+ * with respect to the selection in "fields"
+ *
+ * Return: Success: size of the dataset's datatype
+ * Failure: 0 (valid datatypes are never zero size)
+ *
+ * Programmer: Vailin Choi; March 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5LD_get_dset_type_size(hid_t did, const char *fields)
+{
+ hid_t dset_tid = -1; /* Dataset's type identifier */
+ hid_t tid = -1; /* Native Type identifier */
+ H5LD_memb_t **listv = NULL; /* Vector for storing information in "fields" */
+ char *dup_fields = NULL; /* A copy of "fields" */
+ size_t ret_value = 0; /* Return value */
+
+ /* Get the datatype of the dataset */
+ if((dset_tid = H5Dget_type(did)) < 0)
+ goto done;
+ if((tid = H5Tget_native_type(dset_tid, H5T_DIR_DEFAULT)) < 0)
+ goto done;
+
+ if(fields == NULL) /* If no "fields" is specified */
+ ret_value = H5Tget_size(tid);
+ else { /* "fields" are specified */
+ size_t len; /* Estimate the number of comma-separated fields in "fields" */
+ size_t tot = 0; /* Data type size of all the fields in "fields" */
+ int n = 0, num = 0; /* Local index variables */
+
+ HDassert(fields && *fields);
+
+ /* Should be a compound datatype if "fields" exists */
+ if(H5Tget_class(dset_tid) != H5T_COMPOUND)
+ goto done;
+
+ /* Get a copy of "fields" */
+ if(NULL == (dup_fields = HDstrdup(fields)))
+ goto done;
+
+ /* Allocate memory for a list of H5LD_memb_t pointers to store "fields" info */
+ len = (HDstrlen(fields) / 2) + 2;
+ if(NULL == (listv = (H5LD_memb_t **)HDcalloc(len, sizeof(H5LD_memb_t *))))
+ goto done;
+
+ /* Process and store info for "fields" */
+ if((num = H5LD_construct_vector(dup_fields, listv/*OUT*/, tid)) < 0)
+ goto done;
+
+ /* Sum up the size of all the datatypes in "fields" */
+ for(n = 0; n < num; n++)
+ tot += listv[n]->last_tsize;
+
+ /* Clean up the vector of H5LD_memb_t structures */
+ H5LD_clean_vector(listv);
+
+ /* Return the total size */
+ ret_value = tot;
+ } /* end else */
+
+done:
+ H5E_BEGIN_TRY
+ H5Tclose(tid);
+ H5Tclose(dset_tid);
+ H5E_END_TRY
+
+ /* Free the array of H5LD_memb_t pointers */
+ if(listv)
+ HDfree(listv);
+
+ /* Free memory */
+ if(dup_fields)
+ HDfree(dup_fields);
+
+ return(ret_value);
+} /* H5LD_get_dset_type_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LD_get_dset_elmts
+ *
+ * Purpose: To retrieve selected data from the dataset
+ *
+ * Return: Success: 0
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5LD_get_dset_elmts(hid_t did, const hsize_t *prev_dims, const hsize_t *cur_dims,
+ const char *fields, void *buf)
+{
+ hid_t dtid = -1, tid = -1; /* Dataset type id */
+ hid_t sid = -1, mid = -1; /* Dataspace and memory space id */
+ hssize_t snum_elmts; /* Number of dataset elements in the selection (signed) */
+ hsize_t num_elmts; /* Number of dataset elements in the selection */
+ hsize_t start[H5S_MAX_RANK];/* Starting offset */
+ hsize_t count[H5S_MAX_RANK];/* ??offset */
+ H5LD_memb_t **listv = NULL; /* Vector for storing information in "fields" */
+ char *dup_fields = NULL; /* A copy of "fields" */
+ char *sav_buf = NULL; /* Saved pointer temporary buffer */
+ unsigned ctr; /* Counter for # of curr_dims > prev_dims */
+ int ndims; /* Number of dimensions for the dataset */
+ int i; /* Local index variable */
+ herr_t ret_value = FAIL; /* Return value */
+
+ /* Verify parameters */
+ if(prev_dims == NULL || cur_dims == NULL || buf == NULL)
+ goto done;
+
+ /* Get dataset's dataspace */
+ if((sid = H5Dget_space(did)) < 0)
+ goto done;
+
+ /* Get the number of dimensions */
+ if((ndims = H5Sget_simple_extent_ndims(sid)) < 0)
+ goto done;
+
+ /* Verify that cur_dims must have one dimension whose size is greater than prev_dims */
+ HDmemset(start, 0, sizeof start);
+ HDmemset(count, 0, sizeof count);
+ ctr = 0;
+ for(i = 0; i < ndims; i++)
+ if(cur_dims[i] > prev_dims[i]) {
+ ++ctr;
+ count[i] = cur_dims[i] - prev_dims[i];
+ start[i] = prev_dims[i];
+ } /* end if */
+ else { /* < or = */
+ start[i] = 0;
+ count[i] = MIN(prev_dims[i], cur_dims[i]);
+ } /* end else */
+ if(!ctr)
+ goto done;
+
+ if(ctr == 1) { /* changes for only one dimension */
+ /* Make the selection in the dataset based on "cur_dims" and "prev_dims" */
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ goto done;
+ } /* end if */
+ else { /* changes for more than one dimensions */
+ HDmemset(start, 0, sizeof start);
+
+ /* Make the selection in the dataset based on "cur_dims" and "prev_dims" */
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, cur_dims, NULL) < 0)
+ goto done;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, prev_dims, NULL) < 0)
+ goto done;
+ } /* end else */
+
+ /* Get the number of elements in the selection */
+ if(0 == (snum_elmts = H5Sget_select_npoints(sid)))
+ goto done;
+ num_elmts = (hsize_t)snum_elmts;
+
+ /* Create the memory space for the selection */
+ if((mid = H5Screate_simple(1, &num_elmts, NULL)) < 0)
+ goto done;
+
+ /* Get the native datatype size */
+ if((dtid = H5Dget_type(did)) < 0)
+ goto done;
+ if((tid = H5Tget_native_type(dtid, H5T_DIR_DEFAULT)) < 0)
+ goto done;
+
+ if(fields == NULL) { /* nothing in "fields" */
+ /* Read and store all the elements in "buf" */
+ if(H5Dread(did, tid, mid, sid, H5P_DEFAULT, buf) < 0)
+ goto done;
+ } /* end if */
+ else { /* "fields" is specified */
+ unsigned char *buf_p = (unsigned char *)buf; /* Pointer to the destination buffer */
+ char *tmp_buf; /* Temporary buffer for data read */
+ size_t tot_tsize; /* Total datatype size */
+ size_t len; /* Estimate the number of comma-separated fields in "fields" */
+
+ /* should be a compound datatype if "fields" exists */
+ if(H5Tget_class(tid) != H5T_COMPOUND)
+ goto done;
+
+ /* Get the total size of the dataset's datatypes */
+ if(0 == (tot_tsize = H5LD_get_dset_type_size(did, NULL)))
+ goto done;
+
+ /* Allocate memory for reading in the elements in the dataset selection */
+ if(NULL == (sav_buf = tmp_buf = (char *)HDcalloc((size_t)num_elmts, tot_tsize)))
+ goto done;
+
+ /* Read the dataset elements in the selection */
+ if(H5Dread(did, tid, mid, sid, H5P_DEFAULT, tmp_buf) < 0)
+ goto done;
+
+ /* Make a copy of "fields" */
+ if(NULL == (dup_fields = HDstrdup(fields)))
+ goto done;
+
+ /* Allocate memory for the vector of H5LD_memb_t pointers */
+ len = (HDstrlen(fields) / 2) + 2;
+ if(NULL == (listv = (H5LD_memb_t **)HDcalloc(len, sizeof(H5LD_memb_t *))))
+ goto done;
+
+ /* Process and store information for "fields" */
+ if(H5LD_construct_vector(dup_fields, listv, tid) < 0)
+ goto done;
+
+ /* Copy data for each dataset element in the selection */
+ for(i = 0; i < (int)num_elmts; i++) {
+ int j; /* Local index variable */
+
+ /* Copy data for "fields" to the input buffer */
+ for(j = 0; listv[j] != NULL; j++) {
+ HDmemcpy(buf_p, tmp_buf + listv[j]->tot_offset, listv[j]->last_tsize);
+ buf_p += listv[j]->last_tsize;
+ } /* end for */
+ tmp_buf += tot_tsize;
+ } /* end for */
+
+ /* Clean up the vector of H5LD_memb_t structures */
+ H5LD_clean_vector(listv);
+ } /* end else */
+
+ /* Indicate success */
+ ret_value = SUCCEED;
+
+done:
+ H5E_BEGIN_TRY
+ H5Tclose(dtid);
+ H5Tclose(tid);
+ H5Sclose(sid);
+ H5Sclose(mid);
+ H5E_END_TRY
+
+ /* Free the array of H5LD_memb_t pointers */
+ if(listv)
+ HDfree(listv);
+
+ /* Free memory */
+ if(dup_fields)
+ HDfree(dup_fields);
+ if(sav_buf)
+ HDfree(sav_buf);
+
+ return(ret_value);
+} /* H5LD_get_dset_elmts() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Public functions
+ *
+ *-------------------------------------------------------------------------
+ */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LDget_dset_dims
+ *
+ * Purpose: To retrieve the current dimension sizes for a dataset
+ *
+ * Return: Success: 0
+ * Failure: negative value
+ *
+ * Programmer: Vailin Choi; March 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5LDget_dset_dims(hid_t did, hsize_t *cur_dims)
+{
+ return(H5LD_get_dset_dims(did, cur_dims));
+} /* H5LDget_dset_dims() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LDget_dset_type_size
+ *
+ * Purpose: To return the size in bytes of the datatype for the dataset
+ *
+ * Return: Success: size in bytes of the dataset's datatype
+ * Failure: 0 (valid datatypes are never zero size)
+ *
+ * Programmer: Vailin Choi; March 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+size_t
+H5LDget_dset_type_size(hid_t did, const char *fields)
+{
+ return(H5LD_get_dset_type_size(did, fields));
+} /* H5LDget_dset_type_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5LDget_dset_elmts
+ *
+ * Purpose: To retrieve selected data from the dataset
+ *
+ * Return: Success: 0
+ * Failure: negative value
+ *
+ * Programmer: Vailin Choi; March 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5LDget_dset_elmts(hid_t did, const hsize_t *prev_dims, const hsize_t *cur_dims, const char *fields, void *buf)
+{
+ return(H5LD_get_dset_elmts(did, prev_dims, cur_dims, fields, buf) );
+} /* H5LDget_dset_elmts() */
+
diff --git a/hl/src/H5LDprivate.h b/hl/src/H5LDprivate.h
new file mode 100644
index 0000000..13e0710
--- /dev/null
+++ b/hl/src/H5LDprivate.h
@@ -0,0 +1,49 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef _H5LDprivate_H
+#define _H5LDprivate_H
+
+/* High-level library internal header file */
+#include "H5HLprivate2.h"
+#include "H5LDpublic.h"
+
+/* Store information for a field in <list_of_fields> for a compound data type */
+/*
+ * Note: This data structure is used by both H5LD.c and hl/tools/h5watch
+ * This declaration is repeated in tools/lib/h5tools_str.c
+ */
+typedef struct H5LD_memb_t {
+ size_t tot_offset;
+ size_t last_tsize;
+ hid_t last_tid;
+ char **names;
+} H5LD_memb_t;
+
+/*
+ * Note that these two private routines are called by hl/tools/h5watch.
+ * Have considered the following options:
+ * 1) Repeat the coding in both H5LD.c and h5watch
+ * 2) Make these public routines
+ * 3) Break the rule "to avoid tools calling private routines in the library"
+ * #1: not good for maintenance
+ * #2: these two routines are too specific to be made as public routines
+ * Decide to do #3 at this point of time after some discussion.
+ */
+void H5LD_clean_vector(H5LD_memb_t *listv[]);
+int H5LD_construct_vector(char *fields, H5LD_memb_t *listv[], hid_t par_tid);
+
+#endif /* end _H5LDprivate_H */
+
diff --git a/hl/src/H5LDpublic.h b/hl/src/H5LDpublic.h
new file mode 100644
index 0000000..4844d42
--- /dev/null
+++ b/hl/src/H5LDpublic.h
@@ -0,0 +1,33 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef _H5LDpublic_H
+#define _H5LDpublic_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+H5_HLDLL herr_t H5LDget_dset_dims(hid_t did, hsize_t *cur_dims);
+H5_HLDLL size_t H5LDget_dset_type_size(hid_t did, const char *fields);
+H5_HLDLL herr_t H5LDget_dset_elmts(hid_t did, const hsize_t *prev_dims,
+ const hsize_t *cur_dims, const char *fields, void *buf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _H5LDpublic_H */
+
diff --git a/hl/src/H5TB.c b/hl/src/H5TB.c
index 0f90393..bcd3339 100644
--- a/hl/src/H5TB.c
+++ b/hl/src/H5TB.c
@@ -3248,7 +3248,7 @@ out:
*
*-------------------------------------------------------------------------
*/
-static
+H5_ATTR_PURE static
hbool_t H5TB_find_field(const char *field, const char *field_list)
{
const char *start = field_list;
diff --git a/hl/src/Makefile.am b/hl/src/Makefile.am
index 1e781a9..c1e6810 100644
--- a/hl/src/Makefile.am
+++ b/hl/src/Makefile.am
@@ -31,12 +31,12 @@ lib_LTLIBRARIES=libhdf5_hl.la
libhdf5_hl_la_LDFLAGS= -version-info $(LT_VERS_INTERFACE):$(LT_VERS_REVISION):$(LT_VERS_AGE) $(AM_LDFLAGS)
# List sources to include in the HDF5 HL Library.
-libhdf5_hl_la_SOURCES=H5DO.c H5DS.c H5IM.c H5LT.c H5LTanalyze.c H5LTparse.c H5PT.c H5TB.c
+libhdf5_hl_la_SOURCES=H5DO.c H5DS.c H5IM.c H5LT.c H5LTanalyze.c H5LTparse.c H5PT.c H5TB.c H5LD.c
# HDF5 HL library depends on HDF5 Library.
libhdf5_hl_la_LIBADD=$(LIBHDF5)
# Public header files (to be installed)
-include_HEADERS=hdf5_hl.h H5DOpublic.h H5IMpublic.h H5LTpublic.h H5TBpublic.h H5DSpublic.h H5PTpublic.h
+include_HEADERS=hdf5_hl.h H5DOpublic.h H5IMpublic.h H5LTpublic.h H5TBpublic.h H5DSpublic.h H5PTpublic.h H5LDpublic.h
include $(top_srcdir)/config/conclude.am
diff --git a/hl/src/hdf5_hl.h b/hl/src/hdf5_hl.h
index 6c363f1..f55aa04 100644
--- a/hl/src/hdf5_hl.h
+++ b/hl/src/hdf5_hl.h
@@ -28,6 +28,7 @@
#include "H5IMpublic.h" /* image */
#include "H5TBpublic.h" /* table */
#include "H5PTpublic.h" /* packet table */
+#include "H5LDpublic.h" /* lite dataset */
#endif /*H5_INCLUDE_HL*/
diff --git a/hl/test/Makefile.am b/hl/test/Makefile.am
index 0809deb..7aa3f6b 100644
--- a/hl/test/Makefile.am
+++ b/hl/test/Makefile.am
@@ -1,4 +1,3 @@
-#
# Copyright by The HDF Group.
# Copyright by the Board of Trustees of the University of Illinois.
# All rights reserved.
@@ -29,7 +28,8 @@ LDADD=$(LIBH5_HL) $(LIBH5TEST) $(LIBHDF5)
# Test programs. These are our main targets. They should be listed in the
# order to be executed, generally most specific tests to least specific tests.
-TEST_PROG=test_lite test_image test_file_image test_table test_ds test_packet test_dset_opt
+TEST_PROG=test_lite test_image test_file_image test_table test_ds test_packet test_dset_opt \
+ test_ld test_dset_append
check_PROGRAMS=$(TEST_PROG)
# These programs generate test files for the tests. They don't need to be
@@ -37,7 +37,7 @@ check_PROGRAMS=$(TEST_PROG)
# them in a conditional causes automake to generate rules so that they
# can be built by hand. They can also be built by specifying
# --enable-build-all at configure time.
-BUILD_ALL_PROGS=gen_test_ds
+BUILD_ALL_PROGS=gen_test_ds gen_test_ld
if BUILD_ALL_CONDITIONAL
noinst_PROGRAMS=$(BUILD_ALL_PROGS)
@@ -47,6 +47,6 @@ endif
CHECK_CLEANFILES+=combine_tables[1-2].h5 test_ds[1-9].h5 test_ds10.h5 \
test_image[1-3].h5 file_img[1-2].h5 test_lite[1-4].h5 test_table.h5 \
test_packet_table.h5 test_packet_compress.h5 test_detach.h5 \
- test_dectris.h5
+ test_dectris.h5 test_append.h5
include $(top_srcdir)/config/conclude.am
diff --git a/hl/test/gen_test_ld.c b/hl/test/gen_test_ld.c
new file mode 100644
index 0000000..1313d2a
--- /dev/null
+++ b/hl/test/gen_test_ld.c
@@ -0,0 +1,379 @@
+#include "hdf5.h"
+#include "H5LDprivate.h"
+#include <time.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <stdlib.h>
+
+/*
+ * WATCH.h5: file with various types of datasets for testing--
+ *
+ * The following datasets are chunked, H5D_ALLOC_TIME_INCR, max. dimensional setting:
+ * DSET_ONE: one-dimensional dataset
+ * DSET_TWO: two-dimensional dataset
+ * DSET_CMPD: one-dimensional dataset with compound type
+ * DSET_CMPD_ESC: one-dimensional dataset with compound type and member names with
+ * escape/separator characters
+ * DSET_CMPD_TWO: two-dimensional dataset with compound type
+ *
+ * The following datasets are one-dimensional, chunked, max. dimension setting:
+ * DSET_ALLOC_EARLY: dataset with H5D_ALLOC_TIME_EARLY
+ * DSET_ALLOC_LATE: dataset H5D_ALLOC_TIME_LATE
+ *
+ * The following datasets are one-dimensional:
+ * DSET_NONE: fixed dimension setting, contiguous, H5D_ALLOC_TIME_LATE
+ * DSET_NOMAX: fixed dimension setting, chunked, H5D_ALLOC_TIME_INCR
+ */
+#define ONE_DIMS0 10
+#define MAX_ONE_DIMS0 100
+
+#define DSET_ONE "DSET_ONE"
+#define DSET_NONE "DSET_NONE"
+#define DSET_NOMAX "DSET_NOMAX"
+#define DSET_ALLOC_LATE "DSET_ALLOC_LATE"
+#define DSET_ALLOC_EARLY "DSET_ALLOC_EARLY"
+#define DSET_CMPD "DSET_CMPD"
+#define DSET_CMPD_ESC "DSET_CMPD_ESC"
+#define DSET_NULL "DSET_NULL"
+#define DSET_SCALAR "DSET_SCALAR"
+
+#define TWO_DIMS0 4
+#define TWO_DIMS1 10
+#define MAX_TWO_DIMS0 60
+#define MAX_TWO_DIMS1 100
+
+#define DSET_TWO "DSET_TWO"
+#define DSET_CMPD_TWO "DSET_CMPD_TWO"
+
+#define CHUNK_SIZE 2
+
+#define FILE "test_ld.h5"
+
+/* Data structures for datasets with compound types */
+typedef struct sub22_t {
+ unsigned int a;
+ unsigned int b;
+ unsigned int c;
+} sub22_t;
+
+typedef struct sub2_t {
+ unsigned int a;
+ sub22_t b;
+ unsigned int c;
+} sub2_t;
+
+typedef struct sub4_t {
+ unsigned int a;
+ unsigned int b;
+} sub4_t;
+
+typedef struct set_t {
+ unsigned int field1;
+ sub2_t field2;
+ double field3;
+ sub4_t field4;
+} set_t;
+
+/*
+ **************************************************************************************
+ *
+ * Create a dataset with the given input parameters
+ * Write to the dataset with the given "data"
+ *
+ **************************************************************************************
+ */
+static int
+generate_dset(hid_t fid, const char *dname, int ndims, hsize_t *dims,
+ hsize_t *maxdims, hid_t dtid, void *data)
+{
+ hid_t dcpl = -1; /* Dataset creation property */
+ hid_t did = -1; /* Dataset id */
+ hid_t sid = -1; /* Dataspace id */
+ int i; /* Local index variable */
+
+ /* Create the dataspace */
+ if((sid = H5Screate_simple(ndims, dims, maxdims)) < 0)
+ goto done;
+
+ /* Set up dataset's creation properties */
+ if(!HDstrcmp(dname, DSET_NONE))
+ dcpl = H5P_DEFAULT;
+ else {
+ hsize_t chunk_dims[H5S_MAX_RANK]; /* Dimension sizes for chunks */
+
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto done;
+ for(i = 0; i < ndims; i++)
+ chunk_dims[i] = CHUNK_SIZE;
+ if(H5Pset_chunk(dcpl, ndims, chunk_dims) < 0)
+ goto done;
+ } /* end else */
+
+ if(!HDstrcmp(dname, DSET_ALLOC_LATE)) {
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE) < 0)
+ goto done;
+ } /* end if */
+ else if(!HDstrcmp(dname, DSET_ALLOC_EARLY)) {
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
+ goto done;
+ } /* end if */
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, dname, dtid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto done;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ goto done;
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0)
+ goto done;
+ if(H5Sclose(sid) < 0)
+ goto done;
+ if(H5Dclose(did) < 0)
+ goto done;
+
+ return(SUCCEED);
+
+done:
+ H5E_BEGIN_TRY
+ H5Sclose(sid);
+ H5Pclose(dcpl);
+ H5Dclose(did);
+ H5E_END_TRY
+
+ return(FAIL);
+} /* generate_dset() */
+
+int
+main(void)
+{
+ hid_t fid; /* File id */
+ hid_t fapl; /* File access property list */
+ hsize_t cur_dims[1]; /* Dimension sizes */
+ hsize_t max_dims[1]; /* Maximum dimension sizes */
+ hsize_t cur2_dims[2]; /* Current dimension sizes */
+ hsize_t max2_dims[2]; /* Maximum dimension sizes */
+ hid_t set_tid, esc_set_tid; /* Compound type id */
+ hid_t sub22_tid; /* Compound type id */
+ hid_t sub2_tid, esc_sub2_tid; /* Compound type id */
+ hid_t sub4_tid, esc_sub4_tid; /* Compound type id */
+ hid_t null_did, null_sid; /* H5S_NULL dataset & dataspace ids */
+ hid_t scalar_did, scalar_sid; /* H5S_SCALAR dataset & dataspace ids */
+ int one_data[ONE_DIMS0]; /* Buffer for data */
+ int two_data[TWO_DIMS0*TWO_DIMS1]; /* Buffer for data */
+ set_t one_cbuf[ONE_DIMS0]; /* Buffer for data with compound type */
+ set_t two_cbuf[TWO_DIMS0*TWO_DIMS1]; /* Buffer for data with compound type */
+ int i; /* Local index variable */
+
+ /* Create a file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto done;
+
+ /* Set to use latest library format */
+ if((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)) < 0)
+ goto done;
+
+ /* Create a file */
+ if((fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto done;
+
+ /* Initialization for one-dimensional dataset */
+ cur_dims[0] = ONE_DIMS0;
+ max_dims[0] = MAX_ONE_DIMS0;
+ for(i = 0; i < ONE_DIMS0; i++)
+ one_data[i] = i;
+
+ /* Generate DSET_ONE, DSET_NONE, DSET_NOMAX, DSET_ALLOC_LATE, DSET_EARLY */
+ if(generate_dset(fid, DSET_ONE, 1, cur_dims, max_dims, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_NONE, 1, cur_dims, NULL, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_NOMAX, 1, cur_dims, NULL, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_ALLOC_LATE, 1, cur_dims, max_dims, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_ALLOC_EARLY, 1, cur_dims, max_dims, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+
+ /* Initialization for two-dimensional dataset */
+ cur2_dims[0] = TWO_DIMS0;
+ cur2_dims[1] = TWO_DIMS1;
+ max2_dims[0] = MAX_TWO_DIMS0;
+ max2_dims[1] = MAX_TWO_DIMS1;
+
+ for(i = 0; i < (TWO_DIMS0 * TWO_DIMS1); i++)
+ two_data[i] = i;
+
+ /* Generate DSET_TWO */
+ if(generate_dset(fid, DSET_TWO, 2, cur2_dims, max2_dims, H5T_NATIVE_INT, two_data) < 0)
+ goto done;
+
+ /* Initialization for one-dimensional compound typed dataset */
+ cur_dims[0] = ONE_DIMS0;
+ max_dims[0] = MAX_ONE_DIMS0;
+
+ for (i = 0; i < ONE_DIMS0; i++) {
+ one_cbuf[i].field1 = 1;
+ one_cbuf[i].field2.a = 2;
+ one_cbuf[i].field2.c = 4;
+ one_cbuf[i].field2.b.a = 20;
+ one_cbuf[i].field2.b.b = 40;
+ one_cbuf[i].field2.b.c = 80;
+ one_cbuf[i].field3 = 3.0f;
+ one_cbuf[i].field4.a = 4;
+ one_cbuf[i].field4.b = 8;
+ } /* end for */
+
+ /* Create the compound type */
+ if((sub22_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub22_t))) < 0)
+ goto done;
+ if(H5Tinsert(sub22_tid, "a", HOFFSET(sub22_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub22_tid, "b", HOFFSET(sub22_t, b), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub22_tid, "c", HOFFSET(sub22_t, c), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((sub2_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub2_t))) < 0)
+ goto done;
+ if(H5Tinsert(sub2_tid, "a", HOFFSET(sub2_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub2_tid, "b", HOFFSET(sub2_t, b), sub22_tid) < 0)
+ goto done;
+ if(H5Tinsert(sub2_tid, "c", HOFFSET(sub2_t, c), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((sub4_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub4_t))) < 0)
+ goto done;
+ if(H5Tinsert(sub4_tid, "a", HOFFSET(sub4_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub4_tid, "b", HOFFSET(sub4_t, b), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((set_tid = H5Tcreate(H5T_COMPOUND, sizeof(set_t))) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field1", HOFFSET(set_t, field1), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field2", HOFFSET(set_t, field2), sub2_tid) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field3", HOFFSET(set_t, field3), H5T_NATIVE_DOUBLE) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field4", HOFFSET(set_t, field4), sub4_tid) < 0)
+ goto done;
+
+ /* Create the compound type with escape/separator characters */
+ if((esc_sub2_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub2_t))) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub2_tid, ".a", HOFFSET(sub2_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub2_tid, ",b", HOFFSET(sub2_t, b), sub22_tid) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub2_tid, "\\c", HOFFSET(sub2_t, c), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((esc_sub4_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub4_t))) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub4_tid, "a.", HOFFSET(sub4_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub4_tid, "b,", HOFFSET(sub4_t, b), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((esc_set_tid = H5Tcreate(H5T_COMPOUND, sizeof(set_t))) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field,1", HOFFSET(set_t, field1), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field2.", HOFFSET(set_t, field2), esc_sub2_tid) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field\\3", HOFFSET(set_t, field3), H5T_NATIVE_DOUBLE) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field4,", HOFFSET(set_t, field4), esc_sub4_tid) < 0)
+ goto done;
+
+ /* Generate DSET_CMPD, DSET_CMPD_ESC */
+ if(generate_dset(fid, DSET_CMPD, 1, cur_dims, max_dims, set_tid, one_cbuf) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_CMPD_ESC, 1, cur_dims, max_dims, esc_set_tid, one_cbuf) < 0)
+ goto done;
+
+ /* Initialization for two-dimensional compound typed dataset */
+ cur2_dims[0] = TWO_DIMS0;
+ cur2_dims[1] = TWO_DIMS1;
+ max2_dims[0] = MAX_TWO_DIMS0;
+ max2_dims[0] = MAX_TWO_DIMS1;
+
+ for (i = 0; i < (TWO_DIMS0 * TWO_DIMS1); i++) {
+ two_cbuf[i].field1 = 1;
+ two_cbuf[i].field2.a = 2;
+ two_cbuf[i].field2.c = 4;
+ two_cbuf[i].field2.b.a = 20;
+ two_cbuf[i].field2.b.b = 40;
+ two_cbuf[i].field2.b.c = 80;
+ two_cbuf[i].field3 = 3.0f;
+ two_cbuf[i].field4.a = 4;
+ two_cbuf[i].field4.b = 8;
+ } /* end for */
+
+ /* Generate DSET_CMPD_TWO */
+ if(generate_dset(fid, DSET_CMPD_TWO, 2, cur2_dims, max2_dims, set_tid, two_cbuf) < 0)
+ goto done;
+
+ /* Create NULL dataspace */
+ if((null_sid = H5Screate(H5S_NULL)) < 0)
+ goto done;
+
+ /* Create the NULL dataset */
+ if((null_did = H5Dcreate2(fid, DSET_NULL, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto done;
+
+ /* Create SCALAR dataspace */
+ if((scalar_sid = H5Screate(H5S_SCALAR)) < 0)
+ goto done;
+
+ /* Create the SCALAR dataset */
+ if((scalar_did = H5Dcreate2(fid, DSET_SCALAR, H5T_NATIVE_INT, scalar_sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto done;
+
+ /* Closing */
+ if(H5Dclose(scalar_did) < 0) goto done;
+ if(H5Sclose(scalar_sid) < 0) goto done;
+
+ if(H5Dclose(null_did) < 0) goto done;
+ if(H5Sclose(null_sid) < 0) goto done;
+
+ if(H5Tclose(sub22_tid) < 0) goto done;
+ if(H5Tclose(sub2_tid) < 0) goto done;
+ if(H5Tclose(sub4_tid) < 0) goto done;
+ if(H5Tclose(set_tid) < 0) goto done;
+ if(H5Tclose(esc_sub2_tid) < 0) goto done;
+ if(H5Tclose(esc_sub4_tid) < 0) goto done;
+ if(H5Tclose(esc_set_tid) < 0) goto done;
+
+ if(H5Pclose(fapl) < 0) goto done;
+ if(H5Fclose(fid) < 0) goto done;
+
+ exit(EXIT_SUCCESS);
+
+done:
+ H5E_BEGIN_TRY
+ H5Tclose(sub22_tid);
+ H5Tclose(sub2_tid);
+ H5Tclose(sub4_tid);
+ H5Tclose(set_tid);
+ H5Tclose(esc_sub2_tid);
+ H5Tclose(esc_sub4_tid);
+ H5Tclose(esc_set_tid);
+
+ H5Dclose(null_did);
+ H5Sclose(null_sid);
+ H5Dclose(scalar_did);
+ H5Sclose(scalar_sid);
+
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5E_END_TRY
+
+ exit(EXIT_FAILURE);
+} /* main() */
+
diff --git a/hl/test/test_dset_append.c b/hl/test/test_dset_append.c
new file mode 100644
index 0000000..0f193d9
--- /dev/null
+++ b/hl/test/test_dset_append.c
@@ -0,0 +1,1196 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+* Copyright by The HDF Group. *
+* Copyright by the Board of Trustees of the University of Illinois. *
+* All rights reserved. *
+* *
+* This file is part of HDF5. The full HDF5 copyright notice, including *
+* terms governing use, modification, and redistribution, is contained in *
+* the files COPYING and Copyright.html. COPYING can be found at the root *
+* of the source code distribution tree; Copyright.html can be found at the *
+* root level of an installed copy of the electronic HDF5 document set and *
+* is linked from the top-level documents page. It can also be found at *
+* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+* access to either file, you may request a copy from help@hdfgroup.org. *
+* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <stdlib.h>
+#include <string.h>
+#include "h5hltest.h"
+#include "H5srcdir.h"
+#include "H5DOpublic.h"
+#include <math.h>
+
+#if defined(H5_HAVE_ZLIB_H) && !defined(H5_ZLIB_HEADER)
+# define H5_ZLIB_HEADER "zlib.h"
+#endif
+#if defined(H5_ZLIB_HEADER)
+# include H5_ZLIB_HEADER /* "zlib.h" */
+#endif
+
+#define FILE "test_append.h5"
+#define DNAME_UNLIM "dataset_unlim"
+#define DNAME_LESS "dataset_less"
+#define DNAME_VARY "dataset_vary"
+#define DNAME_ROW "dataset_row"
+#define DNAME_COLUMN "dataset_column"
+#define DBUGNAME1 "dataset_bug1"
+#define DBUGNAME2 "dataset_bug2"
+
+/* The callback function for the object flush property */
+static herr_t
+flush_func(hid_t H5_ATTR_UNUSED obj_id, void *_udata)
+{
+ unsigned *flush_ct = (unsigned*)_udata;
+ ++(*flush_ct);
+ return 0;
+}
+
+/* The callback function for the append flush property */
+static herr_t
+append_func(hid_t H5_ATTR_UNUSED dset_id, hsize_t H5_ATTR_UNUSED *cur_dims, void *_udata)
+{
+ unsigned *append_ct = (unsigned *)_udata;
+ ++(*append_ct);
+ return 0;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_rows_columns
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending rows and columns to a dataset
+ * with 2 extendible dimensions.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_rows_columns(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {0, 10}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int lbuf[10], cbuf[6]; /* The data buffers */
+ int buf[6][13], rbuf[6][13]; /* The data buffers */
+
+ hsize_t boundary[2] = {1, 1}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ int i, j; /* Local index variables */
+
+ TESTING("Append flush with H5DOappend()--append rows & columns");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 2 extendible dimensions */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DNAME_UNLIM, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append 6 rows to the dataset */
+ for(i = 0; i < 6; i++) {
+ for(j = 0; j < 10; j++)
+ lbuf[j] = buf[i][j] = (i * 10) + (j + 1);
+ if(H5DOappend(did, H5P_DEFAULT, 0, (size_t)1, H5T_NATIVE_INT, lbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 6)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 6)
+ TEST_ERROR;
+
+ /* Append 3 columns to the dataset */
+ for(i = 0; i < 3; i++) {
+ for(j = 0; j < 6; j++)
+ cbuf[j] = buf[j][i + 10] = ((i * 6) + (j + 1)) * -1;
+ if(H5DOappend(did, H5P_DEFAULT, 1, (size_t)1, H5T_NATIVE_INT, cbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 9)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 9)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Clear the buffer */
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen2(fid, DNAME_UNLIM, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ H5Pclose(dcpl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_rows_columns() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_rows
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending rows to a dataset with
+ * one extendible dimension (row).
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_rows(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {0, 10}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {H5S_UNLIMITED, 10}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int lbuf[10]; /* The data buffer */
+ int buf[6][10], rbuf[6][10]; /* The data buffers */
+ int i, j; /* Local index variables */
+
+ hsize_t boundary[2] = {1, 0}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ TESTING("Append flush with H5DOappend()--append rows");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 1 extendible dimension */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DNAME_ROW, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append 6 rows to the dataset */
+ for(i = 0; i < 6; i++) {
+ for(j = 0; j < 10; j++)
+ lbuf[j] = buf[i][j] = (i * 10) + (j + 1);
+ if(H5DOappend(did, H5P_DEFAULT, 0, (size_t)1, H5T_NATIVE_INT, lbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 6)
+ TEST_ERROR;
+
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 6)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 10; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Clear the buffer */
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen2(fid, DNAME_ROW, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 10; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ H5Pclose(dcpl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_rows() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_columns
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending columns to a dataset
+ * with one extendible dimension (column).
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_columns(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {6, 0}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {6, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int cbuf[6]; /* The data buffer */
+ int buf[6][3], rbuf[6][3]; /* The data buffers */
+ int i, j; /* Local index variable */
+
+ hsize_t boundary[2] = {0, 1}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ TESTING("Append flush with H5DOappend()--append columns");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 1 extendible dimension */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DNAME_COLUMN, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append 3 columns to the dataset */
+ for(i = 0; i < 3; i++) {
+ for(j = 0; j < 6; j++)
+ cbuf[j] = buf[j][i] = ((i * 6) + (j + 1)) * -1;
+ if(H5DOappend(did, H5P_DEFAULT, 1, (size_t)1, H5T_NATIVE_INT, cbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 3)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 3)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 3; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Clear the buffer */
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen2(fid, DNAME_COLUMN, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 3; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ H5Pclose(dcpl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_columns() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_BUG1
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending rows and columns to an
+ * extendible dataset.
+ * A BUG occurs:
+ * when the extendible dataset is set up as follows:
+ * hsize_t dims[2] = {0, 10};
+ * hsize_t maxdims[2] = {H5S_UNLIMITED, 50};
+ * when append 6 rows and 3 columns to the dataset;
+ * The data is correct when the dataset is read at this point;
+ * The data is incorrect when the dataset is closed, opened again, and read at this point;
+ * NOTE: the problem does not occur when H5Dflush() is not performed for each row/column.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_BUG1(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* Dataset creation property */
+ hid_t dapl = -1; /* Dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {0, 10}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {H5S_UNLIMITED, 50}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int lbuf[10], cbuf[6]; /* The data buffers */
+ int buf[6][13], rbuf[6][13]; /* The data buffers */
+ int i, j; /* Local index variables */
+
+ hsize_t boundary[2] = {1, 1}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ TESTING("Append flush with H5DOappend()--append rows & columns--BUG1");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 2 extendible dimensions */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DBUGNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append 6 rows to the dataset */
+ for(i = 0; i < 6; i++) {
+ for(j = 0; j < 10; j++)
+ lbuf[j] = buf[i][j] = (i * 10) + (j + 1);
+ if(H5DOappend(did, H5P_DEFAULT, 0, (size_t)1, H5T_NATIVE_INT, lbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 6)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 6)
+ TEST_ERROR;
+
+ /* Append 3 columns to the dataset */
+ for(i = 0; i < 3; i++) {
+ for(j = 0; j < 6; j++)
+ cbuf[j] = buf[j][i+10] = ((i * 6) + (j + 1)) * -1;
+ if(H5DOappend(did, H5P_DEFAULT, 1, (size_t)1, H5T_NATIVE_INT, cbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 9)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 9)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+#ifdef BUG1
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen(fid, DBUGNAME1, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+#endif
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Pclose(dapl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_BUG1() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_BUG2
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending rows and columns to an
+ * extendible dataset.
+ * A BUG occurs:
+ * when the extendible dataset is set up as follows:
+ * hsize_t dims[2] = {0, 10};
+ * hsize_t maxdims[2] = {50, H5S_UNLIMITED};
+ * when append 6 rows and 3 columns to the dataset;
+ * The data is correct when the dataset is read at this point;
+ * The data is incorrect when the dataset is closed, opened again, and read at this point;
+ * NOTE: the problem does not occur when H5Dflush() is not performed for each row/column.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_BUG2(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* Dataset creation property */
+ hid_t dapl = -1; /* Dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {0, 10}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {50, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int lbuf[10], cbuf[6]; /* Data buffers */
+ int buf[6][13], rbuf[6][13]; /* Data buffers */
+ int i, j; /* Local index variables */
+
+ hsize_t boundary[2] = {1, 1}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ TESTING("Append flush with H5DOappend()--append rows & columns--BUG2");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 2 extendible dimensions */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DBUGNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append 6 rows to the dataset */
+ for(i = 0; i < 6; i++) {
+ for(j = 0; j < 10; j++)
+ lbuf[j] = buf[i][j] = (i * 10) + (j + 1);
+ if(H5DOappend(did, H5P_DEFAULT, 0, (size_t)1, H5T_NATIVE_INT, lbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 6)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 6)
+ TEST_ERROR;
+
+
+ /* Append 3 columns to the dataset */
+ for(i = 0; i < 3; i++) {
+ for(j = 0; j < 6; j++)
+ cbuf[j] = buf[j][i+10] = ((i * 6) + (j + 1)) * -1;
+ if(H5DOappend(did, H5P_DEFAULT, 1, (size_t)1, H5T_NATIVE_INT, cbuf) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 9)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 9)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+#ifdef BUG2
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen(fid, DBUGNAME2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+#endif
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Pclose(dapl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_BUG2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_less
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending rows and columns to an
+ * extendible dataset where the append size is less than the boundary
+ * size.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_less(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {0, 10}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {100, 100}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int lbuf[20], cbuf[6][3]; /* Data buffers */
+ int buf[6][13], rbuf[6][13]; /* Data buffers */
+ int i, j, k; /* Local index variables */
+
+ hsize_t boundary[2] = {3, 3}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ TESTING("Append flush with H5DOappend()--append size < boundary size");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 2 extendible dimensions */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DNAME_LESS, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append to the dataset 2 rows at a time for 3 times */
+ for(i = 0, k = 0; i < 6; i++) {
+ for(j = 0; j < 10; j++, k++)
+ buf[i][j] = lbuf[k] = (i * 10) + (j + 1);
+
+ if((i + 1) % 2 == 0) {
+ if(H5DOappend(did, H5P_DEFAULT, 0, (size_t)2, H5T_NATIVE_INT, lbuf) < 0)
+ TEST_ERROR;
+ k = 0;
+ } /* end if */
+ } /* end for */
+
+ /* Verify the # of appends */
+ if(append_ct != 2)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 2)
+ TEST_ERROR;
+
+ /* Append 3 columns to the dataset, once */
+ for(i = 0; i < 3; i++)
+ for(j = 0; j < 6; j++, k++)
+ cbuf[j][i] = buf[j][i + 10] = ((i * 6) + (j + 1)) * -1;
+ if(H5DOappend(did, H5P_DEFAULT, 1, (size_t)3, H5T_NATIVE_INT, cbuf) < 0)
+ TEST_ERROR;
+
+ /* Verify the # of appends */
+ if(append_ct != 3)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 3)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Clear the buffer */
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen2(fid, DNAME_LESS, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ H5Pclose(dcpl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_less() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_dataset_append_vary
+ *
+ * Purpose: Verify that the object flush property and the append flush property
+ * are working properly when appending rows and columns to an
+ * extendible dataset where
+ * row: the append size is 3 times of the boundary size
+ * the append callback/flush is performed on the 1st boundary hit
+ * column: the boundary is greater than the append size
+ * the boundary is not hit at all
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dataset_append_vary(hid_t fid)
+{
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ffapl = -1; /* The file's file access property list */
+
+ hsize_t dims[2] = {0, 10}; /* Current dimension sizes */
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2,5}; /* Chunk dimension sizes */
+ int lbuf[60], cbuf[6][3]; /* Data buffers */
+ int buf[6][13], rbuf[6][13]; /* Data buffers */
+ int i, j, k; /* Local index variables */
+
+ hsize_t boundary[2] = {3, 7}; /* Boundary sizes */
+ unsigned append_ct = 0; /* The # of appends */
+ unsigned *flush_ptr; /* Points to the flush counter */
+
+ TESTING("Append flush with H5DOappend()--append & boundary size vary");
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to create a chunked dataset with 2 extendible dimensions */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set append flush property */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_func, &append_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DNAME_VARY, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl)) < 0)
+ TEST_ERROR;
+
+ /* Append 6 rows to the dataset, once */
+ for(i = 0, k = 0; i < 6; i++)
+ for(j = 0; j < 10; j++, k++)
+ buf[i][j] = lbuf[k] = (i * 10) + (j + 1);
+ if(H5DOappend(did, H5P_DEFAULT, 0, (size_t)6, H5T_NATIVE_INT, lbuf) < 0)
+ TEST_ERROR;
+
+ /* Verify the # of appends */
+ if(append_ct != 1)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 1)
+ TEST_ERROR;
+
+ /* Append 3 columns to the dataset, once */
+ for(i = 0; i < 3; i++)
+ for(j = 0; j < 6; j++, k++)
+ cbuf[j][i] = buf[j][i + 10] = ((i * 6) + (j + 1)) * -1;
+ if(H5DOappend(did, H5P_DEFAULT, 1, (size_t)3, H5T_NATIVE_INT, cbuf) < 0)
+ TEST_ERROR;
+
+ /* Verify the # of appends */
+ if(append_ct != 1)
+ TEST_ERROR;
+
+ /* Retrieve and verify object flush counts */
+ if(H5Pget_object_flush_cb(ffapl, NULL, (void **)&flush_ptr) < 0)
+ FAIL_STACK_ERROR;
+ if(*flush_ptr != 1)
+ TEST_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Clear the dataset */
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset again */
+ if((did = H5Dopen2(fid, DNAME_VARY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data */
+ for(i = 0; i < 6; i++)
+ for(j = 0; j < 13; j++)
+ if(buf[i][j] != rbuf[i][j])
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ H5Pclose(dcpl);
+ H5Pclose(sid);
+ H5Dclose(did);
+ H5Pclose(ffapl);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_append_vary() */
+
+/*-------------------------------------------------------------------------
+ * Function: Main function
+ *
+ * Purpose: Test H5Pset/get_object_flush_cb() and H5Pset/get_append_flush()
+ * along with H5DOappend().
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+int main(void)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ unsigned flush_ct = 0; /* The # of flushes */
+ int nerrors = 0; /* The # of errors encountered */
+
+ /* Get a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set object flush property */
+ if(H5Pset_object_flush_cb(fapl, flush_func, &flush_ct) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create the test file */
+ if((fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ nerrors += test_dataset_append_rows(fid);
+
+ flush_ct = 0; /* Reset flush counter */
+ nerrors += test_dataset_append_columns(fid);
+
+ flush_ct = 0; /* Reset flush counter */
+ nerrors += test_dataset_append_rows_columns(fid);
+
+#ifdef BUG1_BUG2
+/*
+ * The following tests illustrate the scenarios when H5DOappend does not work with extensible array indexing:
+ * - when the the dataset has 1 unlimited dimension and the other dimension is fixed but extendible
+ * - the dataset expands along 1 dimension and then expands along the other dimension
+ */
+ flush_ct = 0; /* Reset flush counter */
+ nerrors += test_dataset_append_BUG1(fid);
+
+ flush_ct = 0; /* Reset flush counter */
+ nerrors += test_dataset_append_BUG2(fid);
+#endif
+
+ flush_ct = 0; /* Reset flush counter */
+ nerrors += test_dataset_append_less(fid);
+
+ flush_ct = 0; /* Reset flush counter */
+ nerrors += test_dataset_append_vary(fid);
+
+ /* Closing */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Check for errors */
+ if(nerrors)
+ goto error;
+
+ return 0;
+
+error:
+ return 1;
+}
+
diff --git a/hl/test/test_dset_opt.c b/hl/test/test_dset_opt.c
index a8ffa44..c1e369e 100644
--- a/hl/test/test_dset_opt.c
+++ b/hl/test/test_dset_opt.c
@@ -41,7 +41,7 @@
#define CHUNK_NX 4
#define CHUNK_NY 4
-#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*1.001F)+12)
+#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*(double)1.001F)+12)
/* Temporary filter IDs used for testing */
#define H5Z_FILTER_BOGUS1 305
diff --git a/hl/test/test_file_image.c b/hl/test/test_file_image.c
index 9b18539..6ff5bf4 100644
--- a/hl/test/test_file_image.c
+++ b/hl/test/test_file_image.c
@@ -20,6 +20,12 @@
#define RANK 2
+/* For superblock version 0, 1: the offset to "file consistency flags" is 20 with size of 4 bytes */
+/* The file consistency flags is the "status_flags" field in H5F_super_t */
+/* Note: the offset and size will be different when using superblock version 2 for the test file */
+#define SUPER_STATUS_FLAGS_OFF_V0_V1 20
+#define SUPER_STATUS_FLAGS_SIZE_V0_V1 4
+
/* Test of file image operations.
The following code provides a means to thoroughly test the file image
@@ -214,10 +220,32 @@ test_file_image(size_t open_images, size_t nflags, unsigned *flags)
else
VERIFY(*core_buf_ptr_ptr != buf_ptr[i], "vfd buffer and user buffer should be different");
- /* test whether the contents of the user buffer and driver buffer */
- /* are equal. */
- if (HDmemcmp(*core_buf_ptr_ptr, buf_ptr[i], (size_t)buf_size[i]) != 0)
- FAIL_PUTS_ERROR("comparison of vfd and user buffer failed");
+ /*
+ * When the vfd and user buffers are different and H5LT_FILE_IMAGE_OPEN_RW is enabled,
+ * status_flags in the superblock needs to be cleared in the vfd buffer for
+ * the comparison to proceed as expected. The user buffer as returned from H5Fget_file_image()
+ * has already cleared status_flags. The superblock's status_flags is used for the
+ * implementation of file locking.
+ */
+ if(input_flags[i] & H5LT_FILE_IMAGE_OPEN_RW && !(input_flags[i] & H5LT_FILE_IMAGE_DONT_COPY)) {
+
+ void *tmp_ptr = HDmalloc((size_t)buf_size[i]);
+ /* Copy vfd buffer to a temporary buffer */
+ HDmemcpy(tmp_ptr, (void *)*core_buf_ptr_ptr, (size_t)buf_size[i]);
+ /* Clear status_flags in the superblock for the vfd buffer: file locking is using status_flags */
+ HDmemset((uint8_t *)tmp_ptr + SUPER_STATUS_FLAGS_OFF_V0_V1, (int)0, (size_t)SUPER_STATUS_FLAGS_SIZE_V0_V1);
+ /* Does the comparision */
+ if(HDmemcmp(tmp_ptr, buf_ptr[i], (size_t)buf_size[i]) != 0)
+ FAIL_PUTS_ERROR("comparison of TMP vfd and user buffer failed");
+ /* Free the temporary buffer */
+ if(tmp_ptr) HDfree(tmp_ptr);
+ } else {
+
+ /* test whether the contents of the user buffer and driver buffer */
+ /* are equal. */
+ if (HDmemcmp(*core_buf_ptr_ptr, buf_ptr[i], (size_t)buf_size[i]) != 0)
+ FAIL_PUTS_ERROR("comparison of vfd and user buffer failed");
+ }
} /* end else */
} /* end for */
diff --git a/hl/test/test_ld.c b/hl/test/test_ld.c
new file mode 100644
index 0000000..df721e6
--- /dev/null
+++ b/hl/test/test_ld.c
@@ -0,0 +1,1430 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+* Copyright by The HDF Group. *
+* Copyright by the Board of Trustees of the University of Illinois. *
+* All rights reserved. *
+* *
+* This file is part of HDF5. The full HDF5 copyright notice, including *
+* terms governing use, modification, and redistribution, is contained in *
+* the files COPYING and Copyright.html. COPYING can be found at the root *
+* of the source code distribution tree; Copyright.html can be found at the *
+* root level of an installed copy of the electronic HDF5 document set and *
+* is linked from the top-level documents page. It can also be found at *
+* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+* access to either file, you may request a copy from help@hdfgroup.org. *
+* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <setjmp.h>
+#include "h5hltest.h"
+#include "H5srcdir.h"
+#include "H5LDpublic.h"
+
+/* File name */
+#define FILE "test_ld.h5"
+/* Copied file name */
+#define COPY_FILENAME "COPY_test_ld.h5"
+
+/* Dataset names */
+#define DSET_ONE "DSET_ONE"
+#define DSET_ALLOC_LATE "DSET_ALLOC_LATE"
+#define DSET_ALLOC_EARLY "DSET_ALLOC_EARLY"
+#define DSET_TWO "DSET_TWO"
+#define TWO_DIM_1 4
+#define TWO_DIM_2 10
+#define DSET_CMPD "DSET_CMPD"
+#define DSET_CMPD_ESC "DSET_CMPD_ESC"
+#define DSET_CMPD_TWO "DSET_CMPD_TWO"
+#define DSET_NULL "DSET_NULL"
+#define DSET_SCALAR "DSET_SCALAR"
+
+/* Selected compound field members for testing */
+#define VALID_FIELDS1 "field1,field2.a,field3,field4" /* TEMPORORAY */
+#define VALID_FIELDS2 "field2.b.a,field2.c,field4.b"
+
+#define INVALID_FIELDS1 "field2.k.a,field2.c,field4.k"
+#define INVALID_FIELDS2 "field2.b.a,field2.c,field4.b."
+#define INVALID_FIELDS3 "field2.b.a,,field2.c,field4.b"
+
+#define VALID_ESC_FIELDS1 "field\\,1,field2\\..\\.a,field\\\\3,field4\\,"
+#define VALID_ESC_FIELDS2 "field2\\..\\,b.a,field2\\..\\\\c,field4\\,.b\\,"
+
+#define INVALID_ESC_FIELDS1 "field2\\..\\,k.a,field2\\..\\\\c,field4\\,.k\\,"
+#define INVALID_ESC_FIELDS2 "field2\\..\\,b.a,field2\\..\\\\c,field4\\,.b\\,."
+#define INVALID_ESC_FIELDS3 "field2\\..\\,,b.a,field2\\..\\\\c,field4\\,.b\\,"
+
+/*
+ * Test variations (retained original) for one-dimensional dataset:
+ * Varies from 10->13; 10->9, 10->10, 10->1, 10->11
+ */
+#define ONE_NTESTS 5
+int one_tests[ONE_NTESTS] = {3, -1, 0, -9, 1};
+
+/*
+ * Test variations (retained original) for two-dimensional dataset:
+ * Varies from {4,10}->{6,12}; {4,10}->{6,9}; {4,10}->{6,10};
+ * {4,10}->{3,12}; {4,10}->{3,9}; {4,10}->{3,10};
+ * {4,10}->{4,12}; {4,10}->{4,9}; {4,10}->{4,10}
+ */
+#define TWO_NTESTS 9
+int two_tests[TWO_NTESTS][2] = { {2,2}, {2,-1}, {2,0},
+ {-1,2}, {-1,-1}, {-1,0},
+ {0,2}, {0,-1}, {0,0} };
+
+
+/* Verify that the two input values are the same */
+#define VERIFY_EQUAL(_x, _y) \
+{ \
+ long __x = (long)_x, __y = (long)_y; \
+ if(__x != __y) TEST_ERROR \
+}
+
+/* Temporary buffer for reading in the test file */
+#define TMP_BUF_SIZE 2048
+char g_tmp_buf[TMP_BUF_SIZE];
+
+/* Macros for verifying compound fields */
+/* Verify all fields */
+#define VERIFY_ELMTS_ALL(ent1, ent2) { \
+ VERIFY_EQUAL(ent1.field1, ent2.field1); \
+ VERIFY_EQUAL(ent1.field2.a, ent2.field2.a); \
+ VERIFY_EQUAL(ent1.field2.b.a, ent2.field2.b.a); \
+ VERIFY_EQUAL(ent1.field2.b.b, ent2.field2.b.b); \
+ VERIFY_EQUAL(ent1.field2.b.c, ent2.field2.b.c); \
+ VERIFY_EQUAL(ent1.field2.c, ent2.field2.c); \
+ VERIFY_EQUAL(ent1.field3, ent2.field3); \
+ VERIFY_EQUAL(ent1.field4.a, ent2.field4.a); \
+}
+
+/* Verify fields selected in VALID_FIELDS1 */
+#define VERIFY_ELMTS_VALID1(ent1, ent2) { \
+ VERIFY_EQUAL(ent1.field1, ent2.field1); \
+ VERIFY_EQUAL(ent1.field2_a, ent2.field2.a); \
+ VERIFY_EQUAL(ent1.field3, ent2.field3); \
+ VERIFY_EQUAL(ent1.field4.a, ent2.field4.a); \
+ VERIFY_EQUAL(ent1.field4.b, ent2.field4.b); \
+}
+
+/* Verify fields selected in VALID_FIELDS2 */
+#define VERIFY_ELMTS_VALID2(ent1, ent2) { \
+ VERIFY_EQUAL(ent1.field2_b_a, ent2.field2.b.a); \
+ VERIFY_EQUAL(ent1.field2_c, ent2.field2.c); \
+ VERIFY_EQUAL(ent1.field4_b, ent2.field4.b); \
+}
+
+/* The types of 2-dimensional dataset: DSET_TWO or DSET_CMPD_TWO */
+#define TWO_NONE 0 /* DSET_TWO */
+#define TWO_CMPD_NULL 1 /* DSET_CMPD_TWO with NULL fields */
+#define TWO_CMPD_VALID1 2 /* DSET_CMPD_TWO with VALID_FIELDS1 or VALID_ESC_FIELDS1 */
+#define TWO_CMPD_VALID2 3 /* DSET_CMPD_TWO with VALID_FIELDS2 or VALID_ESC_FIELDS2 */
+
+#define VERIFY_ELMTS(type, k, ind, _ldbuf, _buf) { \
+ if(type == TWO_NONE) { \
+ int *iib = (int *)_ldbuf; \
+ int *ib = (int *)_buf; \
+ \
+ VERIFY_EQUAL(iib[k], ib[ind + n]) \
+ } else if(type == TWO_CMPD_NULL) { \
+ set_t *ccb = (set_t *)_ldbuf; \
+ set_t *cb = (set_t *)_buf; \
+ \
+ VERIFY_ELMTS_ALL(ccb[k], cb[ind + n]) \
+ } else if(type == TWO_CMPD_VALID1) { \
+ test_valid_fields1 *vb1 = (test_valid_fields1 *)_ldbuf; \
+ set_t *cb = (set_t *)_buf; \
+ \
+ VERIFY_ELMTS_VALID1(vb1[k], cb[ind + n]) \
+ } else if(type == TWO_CMPD_VALID2) { \
+ test_valid_fields2 *vb2 = (test_valid_fields2 *)_ldbuf; \
+ set_t *cb = (set_t *)_buf; \
+ \
+ VERIFY_ELMTS_VALID2(vb2[k], cb[ind + n]) \
+ } \
+}
+
+/* Tests for test_LD_elmts_pipe() */
+#define ONE_TESTS 3
+int onetests[ONE_TESTS] = {3, 9, 1};
+#define TWO_TESTS 5
+int twotests[TWO_TESTS][2] = { {2,2}, {2,-1}, {2,0}, {-1,2}, {0,2} };
+
+
+static herr_t test_LD_dims_params(const char *file);
+static herr_t test_LD_dims(const char *file);
+
+static herr_t test_LD_size(const char *file);
+
+static herr_t test_LD_elmts_invalid(const char *file);
+static herr_t test_LD_elmts_one(const char *file, const char *dname, const char *fields);
+static herr_t test_LD_elmts_two(const char *file, const char *dname, const char *fields);
+
+static herr_t verify_elmts_two(int type, hsize_t *ext_dims, hsize_t *prev_dims, void *_ldbuf, void *_buf);
+
+/* data structures for compound data type */
+typedef struct sub22_t {
+ int a;
+ int b;
+ int c;
+} sub22_t;
+
+typedef struct sub2_t {
+ int a;
+ sub22_t b;
+ int c;
+} sub2_t;
+
+typedef struct sub4_t {
+ int a;
+ int b;
+} sub4_t;
+
+typedef struct set_t {
+ int field1;
+ sub2_t field2;
+ double field3;
+ sub4_t field4;
+} set_t;
+
+/* NOTE:
+ * This will fail on heiwa and amani when VALID_FIELDS1 is "field1,field3,field4"
+ * because of alignment problems:
+ * amani and heiwa - 8 byte alignment
+ * jam - 4 byte alignemnt
+ * This will need to be fixed in the library for H5Tget_native_type().
+ */
+/* VALID_FIELDS1 "field1,field2.a,field3,field4" */
+/* VALID_ESC_FIELDS1 "field\\,1,field2\\..\\.a,field\\\\3,field4\\," */
+typedef struct test_valid_fields1 {
+ int field1;
+ int field2_a;
+ double field3;
+ sub4_t field4;
+} test_valid_fields1;
+
+/* VALID_FIELDS2 "field2.b.a,field2.c,field4.b" */
+/* VALID_ESC_FIELDS2 "field2\\..\\,b.a,field2\\..\\\\c,field4\\,.b\\," */
+typedef struct test_valid_fields2 {
+ int field2_b_a;
+ int field2_c;
+ int field4_b;
+} test_valid_fields2;
+
+
+/* Temporary buffers for tests: test_LD_elmts_one() & test_LD_elmts_two() */
+#define TEST_BUF_SIZE 100
+int *iibuf; /* buffer for storing retrieved elements */
+int *ibuf; /* buffer for storing retrieved elements (integer) */
+set_t *cbuf; /* buffer for storing retrieved elemnets (compound) */
+set_t *ccbuf; /* buffer for storing retrieved elemnets (compound) */
+test_valid_fields1 *vbuf1; /* buffer for storing retrieved elements (FIELDS1) */
+test_valid_fields2 *vbuf2; /* buffer for storing retrieved elements (FIELDS2) */
+
+
+/*
+ *********************************************************************************
+ *
+ * Testing for the High Level public routine: H5LDget_dset_dims()
+ * 1) An invalid dataset id
+ * 2) "DSET_ALLOC_EARLY": NULL cur_dims
+ * 3) "DSET_ALLOC_LATE": nonNULL cur_dims
+ * 4) "DSET_CMPD_TWO": nonNULL cur_dims
+ * 5) "DSET_NULL": nonNULL cur_dims
+ * 6) "DSET_SCALAR": nonNULL cur_dims
+ *
+ *********************************************************************************
+ */
+static herr_t
+test_LD_dims_params(const char *file)
+{
+ hid_t fid=-1; /* file identifier */
+ hid_t did=-1; /* dataset identifier */
+ hsize_t one_cur_dims[1]; /* current dimension sizes for 1-dimensonal dataset */
+ hsize_t two_cur_dims[2]; /* current dimension sizes for 2-dimensional dataset */
+ hid_t invalid_id = -1;
+ herr_t ret; /* return value */
+
+ const char *filename = H5_get_srcdir_filename(file);
+
+ TESTING("H5LDget_dset_dims");
+
+ /* Open the copied file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * 1. Verify failure with negative dataset id
+ */
+ H5E_BEGIN_TRY {
+ ret = H5LDget_dset_dims(invalid_id, one_cur_dims);
+ } H5E_END_TRY;
+ VERIFY_EQUAL(ret, FAIL)
+
+ /*
+ * 2. Verify failure for NULL cur_dims
+ */
+ if((did = H5Dopen2(fid, DSET_ALLOC_EARLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ H5E_BEGIN_TRY {
+ ret = H5LDget_dset_dims(did, NULL);
+ } H5E_END_TRY;
+ VERIFY_EQUAL(ret, FAIL)
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * 3. Verify for nonNULL cur_dims
+ */
+ if((did = H5Dopen2(fid, DSET_ALLOC_LATE, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if(H5LDget_dset_dims(did, one_cur_dims) < 0)
+ FAIL_STACK_ERROR
+ VERIFY_EQUAL(one_cur_dims[0], 10)
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * 4. Verify nonNULL cur_dims for a 2-dimensional dataset
+ */
+ if((did = H5Dopen2(fid, DSET_CMPD_TWO, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if(H5LDget_dset_dims(did, two_cur_dims) < 0)
+ FAIL_STACK_ERROR
+ VERIFY_EQUAL(two_cur_dims[0], TWO_DIM_1)
+ VERIFY_EQUAL(two_cur_dims[1], TWO_DIM_2)
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * 5. Verify nonNULL cur_dims for dataset with H5S_NULL dataspace
+ */
+ one_cur_dims[0] = 0;
+
+ if((did = H5Dopen2(fid, DSET_NULL, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5LDget_dset_dims(did, one_cur_dims) < 0)
+ FAIL_STACK_ERROR
+ VERIFY_EQUAL(one_cur_dims[0], 0)
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * 6. Verify nonNULL cur_dims for dataset with H5S_SCALAR dataspace
+ */
+ one_cur_dims[0] = 0;
+
+ if((did = H5Dopen2(fid, DSET_SCALAR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5LDget_dset_dims(did, one_cur_dims) < 0)
+ FAIL_STACK_ERROR
+ VERIFY_EQUAL(one_cur_dims[0], 0)
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return(-1);
+} /* test_LD_dims_params() */
+
+
+/*
+ *********************************************************************************
+ *
+ * Testing for the High Level public routine: H5LDget_dset_dims()
+ * Verify that the dimension sizes retrieved via H5LDget_dset_dims() are correct
+ * for the following cases:
+ *
+ * DSET_ONE: one-dimensional dataset
+ * 1. Increase dims[0]
+ * 2. Decrease dims[0]
+ * 3. same dims[0]
+ * 4. Decrease dims[0]
+ * 5. Increase dims[0]
+ *
+ * one_tests[ONE_NTESTS] = {3, -1, 0, -9, 1}
+ * Varies from 10->3; 10->9, 10->10, 10->1, 10->11
+ *
+ * DSET_TWO: two-dimensional dataset
+ * 1. Increase dims[0], increase dims[1]
+ * 2. Increase dims[0], decrease dims[1]
+ * 3. Increase dims[0], same dims[1]
+ * 4. Decrease dims[0], increase dims[1]
+ * 5. Decrease dims[0], decrease dims[1]
+ * 6. Decrease dims[0], same dims[1]
+ * 7. same dims[0], increase dims[1]
+ * 8. same dims[0], decrease dims[1]
+ * 9. same dims[0], same dims[1]
+ *
+ * two_tests[TWO_NTESTS][2] = { {2,2}, {2,-1}, {2,0},
+ * {-1,2}, {-1,-1}, {-1,0},
+ * {0,2}, {0,-1}, {0,0} }
+ * Varies from {4,10}->{6,12}; {4,10}->{6,9}; {4,10}->{6,10};
+ * {4,10}->{3,12}; {4,10}->{3,9}; {4,10}->{3,10};
+ * {4,10}->{4,12}; {4,10}->{4,9}; {4,10}->{4,10}
+ *
+ *********************************************************************************
+ */
+static herr_t
+test_LD_dims(const char *file)
+{
+ hid_t fid=-1; /* file identifier */
+ hid_t did=-1; /* dataset identifier */
+ hsize_t one_prev_dims[1]; /* original dimension sizes for 1-dimensonal dataset */
+ hsize_t one_cur_dims[1]; /* current dimension sizes for 1-dimensonal dataset */
+ hsize_t one_ext_dims[1]; /* extended dimension sizes for 1-dimensonal dataset */
+ hsize_t two_prev_dims[2]; /* original dimension sizes for 2-dimensional dataset */
+ hsize_t two_cur_dims[2]; /* current dimension sizes for 2-dimensional dataset */
+ hsize_t two_ext_dims[2]; /* extended dimension sizes for 2-dimensional dataset*/
+ int i; /* local index variable */
+
+ TESTING("H5LDget_dset_dims with H5Dset_extent");
+
+ /* Make a copy of the test file */
+ if(h5_make_local_copy(file, COPY_FILENAME) < 0)
+ TEST_ERROR
+
+ /* Open the copied file */
+ if((fid = H5Fopen(COPY_FILENAME, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Testing with one-dimensional dataset: DSET_ONE
+ */
+ if((did = H5Dopen2(fid, DSET_ONE, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve dimension sizes */
+ if(H5LDget_dset_dims(did, one_prev_dims) < 0)
+ FAIL_STACK_ERROR
+
+ for(i = 0; i < ONE_NTESTS; i++) {
+
+ /* Set up the extended dimension sizes */
+ one_ext_dims[0] = (hsize_t)((int)one_prev_dims[0] + one_tests[i]);
+
+ /* Change the dimension size */
+ if(H5Dset_extent(did, one_ext_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the dimension size */
+ if(H5LDget_dset_dims(did, one_cur_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify that the retrieved dimension size is correct as expected */
+ VERIFY_EQUAL(one_cur_dims[0], one_ext_dims[0])
+ }
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Testing with two-dimensional dataset: DSET_TWO
+ */
+ if((did = H5Dopen2(fid, DSET_TWO, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the dimension sizes */
+ if(H5LDget_dset_dims(did, two_prev_dims) < 0)
+ FAIL_STACK_ERROR
+
+ for(i = 0; i < TWO_NTESTS; i++) {
+
+ /* Set up the extended dimension sizes */
+ two_ext_dims[0] = (hsize_t)((int)two_prev_dims[0] + two_tests[i][0]);
+ two_ext_dims[1] = (hsize_t) ((int)two_prev_dims[1] + two_tests[i][1]);
+
+ /* Change the dimension sizes */
+ if(H5Dset_extent(did, two_ext_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the dimension sizes */
+ if(H5LDget_dset_dims(did, two_cur_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify that the retrieved dimension sizes are correct as expected */
+ VERIFY_EQUAL(two_cur_dims[0], two_ext_dims[0])
+ VERIFY_EQUAL(two_cur_dims[1], two_ext_dims[1])
+ } /* end TWO_NTESTS */
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Remove the copied file */
+ HDremove(COPY_FILENAME);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return(-1);
+
+} /* test_LD_dims() */
+
+
+/*
+ **********************************************************************************
+ *
+ * Testing for the High Level public routine: H5LDget_dset_type_size()
+ * Verify that the data type size returned via H5LDget_dset_type_size()
+ * are correct for the following cases:
+ *
+ * Verify failure for an invalid dataset id
+ *
+ * DSET_CMPD: one-dimensional dataset with compound type
+ * 1. The whole element
+ * 2. VALID_FIELDS1: "field1,field2.a,field3,field4"
+ * 3. VALID_FIELDS2: "field2.b.a,field2.c,field4.b"
+ * 4. INVALID_FIELDS1: "field2.k.a,field2.c,field4.k"
+ * 5. INVALID_FIELDS2: "field2.b.a,field2.c,field4.b."
+ * 6. INVALID_FIELDS3: "field2.b.a,,field2.c,field4.b"
+ *
+ * DSET_CMPD_ESC: one-dimensional dataset with compound type and
+ * member names with escape/separator characters
+ * 1. The whole element
+ * 2. VALID_ESC_FIELDS1: "field\\,1,field2\\..\\.a,field\\\\3,field4\\,"
+ * 3. VALID_ESC_FIELDS2: "field2\\..\\,b.a,field2\\..\\\\c,field4\\,.b\\,"
+ * 4. INVALID_ESC_FIELDS1: "field2\\..\\,k.a,field2\\..\\\\c,field4\\,.k\\,"
+ * 5. INVALID_ESC_FIELDS2: "field2\\..\\,b.a,field2\\..\\\\c,field4\\,.b\\,."
+ * 6. INVALID_ESC_FIELDS3: "field2\\..\\,,b.a,field2\\..\\\\c,field4\\,.b\\,"
+ *
+ **********************************************************************************
+ */
+static int
+test_LD_size(const char *file)
+{
+ hid_t fid=-1; /* file identifier */
+ hid_t did=-1; /* dataset identifier */
+ hid_t dtid=-1; /* dataset's datatype identifier */
+ hid_t invalid_id=-1;
+ hid_t memb0_tid=-1; /* type identifier for a member in the compound type */
+ hid_t memb1_tid=-1; /* type identifier for a member in the compound type */
+ hid_t memb2_tid=-1; /* type identifier for a member in the compound type */
+ hid_t memb3_tid=-1; /* type identifier for a member in the compound type */
+ hid_t memb_tid=-1; /* type identifier for a member in the compound type */
+ hid_t memb_tid2=-1; /* type identifier for a member in the compound type */
+ size_t dsize; /* size of the dataset's datatype */
+ size_t ck_dsize; /* size of the dataset's datatype to be checked against */
+
+ const char *filename = H5_get_srcdir_filename(file);
+
+ TESTING("H5LDget_dset_type_size");
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Verify failure with an invalid dataset id
+ */
+ H5E_BEGIN_TRY {
+ dsize = H5LDget_dset_type_size(invalid_id, NULL);
+ } H5E_END_TRY;
+ VERIFY_EQUAL(dsize, 0)
+
+ /*
+ * Testing one-dimensional dataset with compound datatype:
+ * DSET_CMPD
+ */
+
+ /* Open dataset DSET_CMPD */
+ if((did = H5Dopen2(fid, DSET_CMPD, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Return size of the whole element */
+ if((dsize = H5LDget_dset_type_size(did, NULL)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Get the dataset's datatype and then its datatype size */
+ if((dtid = H5Tget_native_type(H5Dget_type(did), H5T_DIR_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ if((ck_dsize = H5Tget_size(dtid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Verify case #1 */
+ VERIFY_EQUAL(dsize, ck_dsize)
+
+ /* Get datatype id for each member */
+ if((memb0_tid = H5Tget_member_type(dtid, 0)) < 0) /* "field1" */
+ FAIL_STACK_ERROR
+ if((memb1_tid = H5Tget_member_type(dtid, 1)) < 0) /* "field2" */
+ FAIL_STACK_ERROR
+ if((memb2_tid = H5Tget_member_type(dtid, 2)) < 0) /* "field3" */
+ FAIL_STACK_ERROR
+ if((memb3_tid = H5Tget_member_type(dtid, 3)) < 0) /* "field4" */
+ FAIL_STACK_ERROR
+
+ /* Obtain size for VALID_FIELDS1: "field1,field2.a,field3,field4" */
+ if((dsize = H5LDget_dset_type_size(did, VALID_FIELDS1)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Get the datatype size for "field1" */
+ if((ck_dsize = H5Tget_size(memb0_tid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field2.a" */
+ if((memb_tid = H5Tget_member_type(memb1_tid, 0)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize += H5Tget_size(memb_tid)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field3" */
+ if((ck_dsize += H5Tget_size(memb2_tid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field4" */
+ if((ck_dsize += H5Tget_size(memb3_tid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Verify case #2 */
+ VERIFY_EQUAL(dsize, ck_dsize)
+
+ /* Obtain datatype size for VALID_FIELDS2: "field2.b.a,field2.c,field4.b" */
+ if((dsize = H5LDget_dset_type_size(did, VALID_FIELDS2)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Get the datatype size for "field2.b.a" */
+ if((memb_tid = H5Tget_member_type(memb1_tid, 1)) < 0)
+ FAIL_STACK_ERROR
+ if((memb_tid2 = H5Tget_member_type(memb_tid, 0)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize = H5Tget_size(memb_tid2)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field2.c" */
+ if((memb_tid = H5Tget_member_type(memb1_tid, 2)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize += H5Tget_size(memb_tid)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field4.b" */
+ if((memb_tid = H5Tget_member_type(memb3_tid, 1)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize += H5Tget_size(memb_tid)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify case #3 */
+ VERIFY_EQUAL(dsize, ck_dsize)
+
+ /*
+ * Verify failure for the following invalid nested fields:
+ * INVALID_FIELDS1: "field2.k.a,field2.c,field4.k"
+ * INVALID_FIELDS2: "field2.b.a,field2.c,field4.b."
+ * INVALID_FIELDS3: "field2.b.a,,field2.c,field4.b"
+ */
+ /* Verify failure for case #4 */
+ dsize = H5LDget_dset_type_size(did, INVALID_FIELDS1);
+ VERIFY_EQUAL(dsize, 0)
+
+ /* Verify failure for case #5 */
+ dsize = H5LDget_dset_type_size(did, INVALID_FIELDS2);
+ VERIFY_EQUAL(dsize, 0)
+
+ /* Verify failure for case #6 */
+ dsize = H5LDget_dset_type_size(did, INVALID_FIELDS3);
+ VERIFY_EQUAL(dsize, 0)
+
+ /* Closing */
+ if(H5Tclose(memb0_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb1_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb2_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb3_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(dtid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Testing one-dimensional dataset with compound datatype and
+ * member names consisting of escape/separator characters:
+ * DSET_CMPD_ESC
+ */
+
+ /* Open dataset DSET_CMPD_ESC */
+ if((did = H5Dopen2(fid, DSET_CMPD_ESC, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Return size of the whole element */
+ if((dsize = H5LDget_dset_type_size(did, NULL)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Get the dataset's datatype and then its datatype size */
+ if((dtid = H5Tget_native_type(H5Dget_type(did), H5T_DIR_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize = H5Tget_size(dtid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Verify case #1 */
+ VERIFY_EQUAL(dsize, ck_dsize)
+
+ /* Get datatype id for each member */
+ if((memb0_tid = H5Tget_member_type(dtid, 0)) < 0) /* "field,1" */
+ FAIL_STACK_ERROR
+ if((memb1_tid = H5Tget_member_type(dtid, 1)) < 0) /* "field2." */
+ FAIL_STACK_ERROR
+ if((memb2_tid = H5Tget_member_type(dtid, 2)) < 0) /* "field\3" */
+ FAIL_STACK_ERROR
+ if((memb3_tid = H5Tget_member_type(dtid, 3)) < 0) /* "field4," */
+ FAIL_STACK_ERROR
+
+ /* Obtain size for VALID_ESC_FIELDS1: "field\\,1,field2\\..\\.a,field\\\\3,field4\\," */
+ if((dsize = H5LDget_dset_type_size(did, VALID_ESC_FIELDS1)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Get the datatype size for "field\\,1" */
+ if((ck_dsize = H5Tget_size(memb0_tid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field2\\..\\.a" */
+ if((memb_tid = H5Tget_member_type(memb1_tid, 0)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize += H5Tget_size(memb_tid)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field\\\\3" */
+ if((ck_dsize += H5Tget_size(memb2_tid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field4\\," */
+ if((ck_dsize += H5Tget_size(memb3_tid)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Verify case #2 */
+ VERIFY_EQUAL(dsize, ck_dsize)
+
+ /* Obtain datatype size for VALID_ESC_FIELDS2:
+ "field2\\..\\,b.a,field2\\..\\\\c,field4\\,.b\\," */
+ if((dsize = H5LDget_dset_type_size(did, VALID_ESC_FIELDS2)) == 0)
+ FAIL_STACK_ERROR
+
+ /* Get the datatype size for "field2\..,b.a" */
+ if((memb_tid = H5Tget_member_type(memb1_tid, 1)) < 0)
+ FAIL_STACK_ERROR
+ if((memb_tid2 = H5Tget_member_type(memb_tid, 0)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize = H5Tget_size(memb_tid2)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field2\..\\c" */
+ if((memb_tid = H5Tget_member_type(memb1_tid, 2)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize += H5Tget_size(memb_tid)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Add the datatype size for "field4\,.b\," */
+ if((memb_tid = H5Tget_member_type(memb3_tid, 1)) < 0)
+ FAIL_STACK_ERROR
+ if((ck_dsize += H5Tget_size(memb_tid)) == 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb_tid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify case #3 */
+ VERIFY_EQUAL(dsize, ck_dsize)
+
+ /*
+ * Verify failure for the following invalid nested fields:
+ * INVALID_ESC_FIELDS1: "field2\..\,k.a,field2\..\\c,field4\,.k\,"
+ * INVALID_ESC_FIELDS2: "field2\..\,b.a,field2\..\\c,field4\,.b\,."
+ * INVALID_ESC_FIELDS3: "field2\..\,,b.a,field2\..\\c,field4\,.b\,"
+ */
+ /* Verify failure for case #4 */
+ dsize = H5LDget_dset_type_size(did, INVALID_ESC_FIELDS1);
+ VERIFY_EQUAL(dsize, 0)
+
+ /* Verify failure for case #5 */
+ dsize = H5LDget_dset_type_size(did, INVALID_ESC_FIELDS2);
+ VERIFY_EQUAL(dsize, 0)
+
+ /* Verify failure for case #6 */
+ dsize = H5LDget_dset_type_size(did, INVALID_ESC_FIELDS3);
+ VERIFY_EQUAL(dsize, 0)
+
+ /* Closing */
+ if(H5Tclose(memb0_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb1_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb2_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(memb3_tid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Tclose(dtid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(memb0_tid);
+ H5Tclose(memb1_tid);
+ H5Tclose(memb2_tid);
+ H5Tclose(memb3_tid);
+ H5Tclose(memb_tid);
+ H5Tclose(memb_tid2);
+ H5Tclose(dtid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return(-1);
+
+} /* test_LD_size() */
+
+
+/*
+ **************************************************************************************
+ * Testing for the High Level public routine: H5LDget_dset_elmts()
+ * Verify failures when calling H5LDget_dset_elmts() with the following
+ * invalid conditions:
+ *
+ * A. DSET_TWO: two-dimensional dataset
+ * 1. CUR_DIMS and PREV_DIMS are NULL
+ * 2. PREV_DIMS is NULL
+ * 3. CUR_DIMS is NULL
+ * 4. FIELDS is nonnull but the dataset is not compound datatype
+ * 5. BUF is NULL
+ * 6. CUR_DIMS is not greater than PREV_DIMS
+ *
+ * B. DSET_CMPD: one-dimensional dataset with compound type
+ * 1. Invalid dataset id
+ * 2. FIELDS are not valid members in the compound type
+ *
+ **************************************************************************************
+ */
+static int
+test_LD_elmts_invalid(const char *file)
+{
+ hid_t fid=-1; /* file identifier */
+ hid_t did=-1; /* dataset identifier */
+ hid_t sid=-1; /* dataspace identifier */
+ hid_t invalid_id=-1;
+ int ret; /* return value */
+ hsize_t cur_dims[2]; /* current dimension sizes of the dataset */
+ hsize_t prev_dims[2]; /* previous dimension sizes of the dataset */
+ char tbuf[2]; /* temporary buffer for testing */
+ int ndims; /* # of dimension sizes */
+ int i; /* local index variable */
+
+ const char *filename = H5_get_srcdir_filename(file);
+
+ TESTING("H5LDget_dset_elmts on invalid conditions");
+
+ /* Open the copied file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Testing two-dimensional dataset: DSET_TWO
+ */
+
+ /* Open dataset: DSET_TWO */
+ if((did = H5Dopen2(fid, DSET_TWO, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify failure from case #1: cur_dims and prev_dims are NULL */
+ ret = H5LDget_dset_elmts(did, NULL, NULL, NULL, NULL);
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Verify failure from case #2: prev_dims is NULL */
+ ret = H5LDget_dset_elmts(did, cur_dims, NULL, NULL, NULL);
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Verify failure from case #3: cur_dims is NULL */
+ ret = H5LDget_dset_elmts(did, NULL, prev_dims, NULL, NULL);
+ VERIFY_EQUAL(ret, FAIL)
+
+ if((sid = H5Dget_space(did)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the # of dimensions and current dimension sizes */
+ if((ndims = H5Sget_simple_extent_dims(sid, cur_dims, NULL)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set up valid cur_dims and prev_dims */
+ for(i = 0; i < ndims; i++)
+ prev_dims[i] = cur_dims[i] - 1;
+
+ /* Verify failure from case #4: FIELDS is nonNULL but the dataset is not compound datatype */
+ ret = H5LDget_dset_elmts(did, prev_dims, cur_dims, "field1", tbuf);
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Verify failure from case #5: BUF is NULL */
+ ret = H5LDget_dset_elmts(did, prev_dims, cur_dims, NULL, NULL);
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Verify failure from case #6: cur_dims is not > than prev_dims */
+ cur_dims[0] = prev_dims[0] - 1;
+ cur_dims[1] = prev_dims[1] - 1;
+ ret = H5LDget_dset_elmts(did, prev_dims, cur_dims, NULL, tbuf);
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Close DSET_TWO */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Testing one-dimensional dataset with compound datatype:
+ * DSET_CMPD
+ */
+
+ /* Verify failure from case #1: an invalid dataset id */
+ H5E_BEGIN_TRY {
+ ret = H5LDget_dset_elmts(invalid_id, prev_dims, cur_dims, NULL, tbuf);
+ } H5E_END_TRY;
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Open dataset: DSET_CMPD */
+ if((did = H5Dopen2(fid, DSET_CMPD, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set up valid cur_dims, prev_dims */
+ prev_dims[0] = cur_dims[0] - 1;
+
+ /* Verify failure from case #2: invalid FIELDS */
+ ret = H5LDget_dset_elmts(did, prev_dims, cur_dims, "field2.k.a,field2.c,field4.k", tbuf);
+ VERIFY_EQUAL(ret, FAIL)
+
+ /* Close DSET_CMPD */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return(-1);
+
+} /* test_LD_elmts_invalid() */
+
+
+/*
+ **************************************************************************************
+ * Testing for the High Level public routine: H5LDget_dset_elmts()
+ * Verify elements retrieved via H5LDget_dset_elmts() are correct as expected
+ * when the dataset's dimension sizes are changed according to one_tests[]:
+ *
+ * one-dimensional dataset :
+ * DSET_ONE with NULL fields
+ * DSET_CMPD with fields: NULL, VALID_FIELDS1, VALID_FIELDS2
+ * DSET_CMPD_ESC with fields: NULL, VALID_ESC_FIELDS1, VALID_ESC_FIELDS2
+ *
+ * case #1. increase dims[0]
+ * case #2. decrease dims[0] (failure)
+ * case #3. same dims[0] (failure)
+ * case #4. decrease dims[0] (failure)
+ * case #5. increase dims[0]
+ *
+ **************************************************************************************
+ */
+static herr_t
+test_LD_elmts_one(const char *file, const char *dname, const char *fields)
+{
+ hid_t fid=-1; /* file identifier */
+ hid_t did=-1; /* dataset identifier */
+ hid_t dtype=-1; /* dataset's data type */
+ hsize_t ext_dims[1]; /* extended dimension sizes of the dataset */
+ hsize_t prev_dims[1]; /* previous dimension sizes of the dataset */
+ int i, j; /* local index variable */
+ int ret = 0; /* return value */
+
+ TESTING("H5LDget_dset_elmts: one-dimensional dataset");
+
+ /* Copy the test file */
+ if(h5_make_local_copy(file, COPY_FILENAME) < 0)
+ TEST_ERROR
+
+ for(i = 0; i < TEST_BUF_SIZE; i++) {
+ cbuf[i].field1 = i;
+ cbuf[i].field2.a = i;
+ cbuf[i].field2.b.a = i;
+ cbuf[i].field2.b.b = i;
+ cbuf[i].field2.b.c = i;
+ cbuf[i].field2.c = i;
+ cbuf[i].field3 = (double)i;
+ cbuf[i].field4.a = i;
+ cbuf[i].field4.b = i;
+ ibuf[i] = i;
+ } /* end for */
+
+ /* Open the copied file */
+ if((fid = H5Fopen(COPY_FILENAME, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the dataset's data type */
+ if((dtype = H5Tget_native_type(H5Dget_type(did), H5T_DIR_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get current dimension sizes before extending the dataset's dimension sizes */
+ if(H5LDget_dset_dims(did, prev_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Loop through different variations of extending the dataset */
+ for(i = 0; i < ONE_NTESTS; i++) {
+ HDmemset(vbuf1, 0, TEST_BUF_SIZE * sizeof(test_valid_fields1));
+ HDmemset(vbuf2, 0, TEST_BUF_SIZE * sizeof(test_valid_fields2));
+ HDmemset(ccbuf, 0, TEST_BUF_SIZE * sizeof(set_t));
+ HDmemset(iibuf, 0, TEST_BUF_SIZE * sizeof(int));
+
+ ext_dims[0] = (hsize_t)((int)prev_dims[0] + one_tests[i]);
+
+ /* Change the dimension sizes of the dataset */
+ if(H5Dset_extent(did, ext_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize data */
+ if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
+ if(H5Dwrite(did, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, cbuf) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+ else if(!HDstrcmp(dname, DSET_ONE)) {
+ if(H5Dwrite(did, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* There are changes in dimension sizes */
+ if(one_tests[i] > 0) {
+ if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
+ if(fields) {
+ if(!HDstrcmp(fields, VALID_FIELDS1) || !HDstrcmp(fields, VALID_ESC_FIELDS1)) {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, vbuf1) < 0)
+ TEST_ERROR
+ for(j = 0; j < one_tests[i]; j++)
+ VERIFY_ELMTS_VALID1(vbuf1[j], cbuf[prev_dims[0] + (hsize_t)j])
+ } /* end if */
+ else if(!HDstrcmp(fields, VALID_FIELDS2) || !HDstrcmp(fields, VALID_ESC_FIELDS2)) {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, vbuf2) < 0)
+ TEST_ERROR
+ for(j = 0; j < one_tests[i]; j++)
+ VERIFY_ELMTS_VALID2(vbuf2[j], cbuf[prev_dims[0] + (hsize_t)j])
+ } /* end else-if */
+ else
+ TEST_ERROR
+ } /* end if */
+ else {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, ccbuf) < 0)
+ TEST_ERROR
+ for(j = 0; j < one_tests[i]; j++)
+ VERIFY_ELMTS_ALL(ccbuf[j], cbuf[prev_dims[0] + (hsize_t)j])
+ } /* end else-if */
+ } /* end if */
+ else {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, iibuf) < 0)
+ TEST_ERROR
+ for(j = 0; j < one_tests[i]; j++)
+ VERIFY_EQUAL(iibuf[j], ibuf[prev_dims[0] + (hsize_t)j])
+ } /* end else */
+ } /* end if */
+ else {
+ /* Verify failure when changes between prev_dims and ext_dims are same/decrease */
+ ret = H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, iibuf);
+ VERIFY_EQUAL(ret, FAIL)
+ } /* end else */
+ } /* end for */
+
+ /* Closing */
+ if(H5Tclose(dtype) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Remove the copied file */
+ HDremove(COPY_FILENAME);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(dtype);
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return(-1);
+} /* test_LD_elmts_one() */
+
+
+/*
+ **************************************************************************************
+ *
+ * Helper routine to verify elements of a 2-dimensional dataset
+ * _ldbuf contains the elements retrieved via H5LDget_dset_elmts()
+ * _buf contains the data written to the dataset
+ *
+ * e.g. prev_dims[2] = {4, 6}; ext_dims[2] = {6, 10}
+ * elements marked in 'v' in _buf are compared to elements in _ldbuf
+ * 0 1 2 3 4 5 | 6 7 8 9
+ * 0 | v v v v
+ * 1 | v v v v
+ * 2 | v v v v
+ * 3 | v v v v
+ * ---------------------
+ * 4 v v v v v v v v v v
+ * 5 v v v v v v v v v v
+ *
+ **************************************************************************************
+ */
+static herr_t
+verify_elmts_two(int type, hsize_t *ext_dims, hsize_t *prev_dims, void *_ldbuf, void *_buf)
+{
+ int k, m; /* Local index variable */
+
+ k = 0;
+ for(m = 0; m < (int)ext_dims[0]; m++) {
+ int n, ind; /* Local index variable */
+
+ ind = m * (int)ext_dims[1];
+ if(m < (int)prev_dims[0]) {
+ for(n = (int)prev_dims[1]; n < (int)ext_dims[1]; n++) {
+ VERIFY_ELMTS(type, k, ind, _ldbuf, _buf)
+ ++k;
+ } /* end for */
+ } /* end if */
+ else {
+ for(n = 0; n < (int)ext_dims[1]; n++) {
+ VERIFY_ELMTS(type, k, ind, _ldbuf, _buf)
+ ++k;
+ } /* end for */
+ } /* end else */
+ } /* end for */
+
+ return(0);
+
+error:
+ return(-1);
+} /* verify_elmts_two() */
+
+
+/*
+ **************************************************************************************
+ * Testing for the High Level public routine: H5LDget_dset_elmts()
+ * Verify elements retrieved via H5LDget_dset_elmts() are correct as expected when
+ * the datset's dimension sizes are changed accordingly to two_tests[]:
+ *
+ * two-dimensional dataset: DSET_TWO with NULL fields
+ * DSET_CMPD_TWO with fields: NULL, VALID_FIELDS1, VALID_FIELDS2
+ *
+ * dims[0] dims[1]
+ * ------- -------
+ * case #1: increase increase
+ * case #2: increase decrease
+ * case #3: increase same
+ * case #4: decrease increase
+ * case #5: decrease decrease (failure)
+ * case #6: decrease same (failure)
+ * case #7: same increase
+ * case #8: same decrease (failure)
+ * case #9: same same (failure)
+ *
+ **************************************************************************************
+ */
+static herr_t
+test_LD_elmts_two(const char *file, const char *dname, const char *fields)
+{
+ hid_t fid=-1; /* file identifier */
+ hid_t did=-1; /* dataset identifier */
+ hid_t dtype=-1; /* dataset's data type */
+ hsize_t ext_dims[2]; /* extended dimension sizes of the dataset */
+ hsize_t prev_dims[2]; /* previous dimension sizes of the dataset */
+ int i; /* local index variable */
+ int ret = 0; /* return value */
+
+ TESTING("H5LDget_dset_elmts: two-dimensional dataset");
+
+ /* Copy the test file */
+ if(h5_make_local_copy(file, COPY_FILENAME) < 0)
+ TEST_ERROR
+
+ for(i = 0; i < TEST_BUF_SIZE; i++) {
+ cbuf[i].field1 = i;
+ cbuf[i].field2.a = i;
+ cbuf[i].field2.b.a = i;
+ cbuf[i].field2.b.b = i;
+ cbuf[i].field2.b.c = i;
+ cbuf[i].field2.c = i;
+ cbuf[i].field3 = (double)i;
+ cbuf[i].field4.a = i;
+ cbuf[i].field4.b = i;
+ ibuf[i] = i;
+ } /* end for */
+
+ /* Open the copied file */
+ if((fid = H5Fopen(COPY_FILENAME, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the dataset's data type */
+ if((dtype = H5Tget_native_type(H5Dget_type(did), H5T_DIR_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get current dimension sizes before extending the dataset's dimension sizes */
+ if(H5LDget_dset_dims(did, prev_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Loop through different variations of extending the dataset */
+ for(i = 0; i < TWO_NTESTS; i++) {
+ HDmemset(vbuf1, 0, TEST_BUF_SIZE * sizeof(test_valid_fields1));
+ HDmemset(vbuf2, 0, TEST_BUF_SIZE * sizeof(test_valid_fields2));
+ HDmemset(ccbuf, 0, TEST_BUF_SIZE * sizeof(set_t));
+ HDmemset(iibuf, 0, TEST_BUF_SIZE * sizeof(int));
+
+ ext_dims[0] = (hsize_t)((int)prev_dims[0] + two_tests[i][0]);
+ ext_dims[1] = (hsize_t)((int)prev_dims[1] + two_tests[i][1]);
+
+ /* Change the dimension sizes of the dataset */
+ if(H5Dset_extent(did, ext_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize data */
+ if(!HDstrcmp(dname, DSET_CMPD_TWO)) {
+ if(H5Dwrite(did, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, cbuf) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+ else if(!HDstrcmp(dname, DSET_TWO)) {
+ if(H5Dwrite(did, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf) < 0)
+ FAIL_STACK_ERROR
+ } /* end else-if */
+ else
+ TEST_ERROR
+
+ /* There are changes in dimension sizes */
+ if(two_tests[i][0] > 0 || two_tests[i][1] > 0) {
+ if(!HDstrcmp(dname, DSET_CMPD_TWO)) {
+ if(fields) {
+ if(!HDstrcmp(fields, VALID_FIELDS1) || !HDstrcmp(fields, VALID_ESC_FIELDS1)) {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, vbuf1) < 0)
+ TEST_ERROR
+ if(verify_elmts_two(TWO_CMPD_VALID1, ext_dims, prev_dims, vbuf1, cbuf) < 0)
+ TEST_ERROR
+ } /* end if */
+ else if(!HDstrcmp(fields, VALID_FIELDS2) || !HDstrcmp(fields, VALID_ESC_FIELDS2)) {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, vbuf2) < 0)
+ TEST_ERROR
+ if(verify_elmts_two(TWO_CMPD_VALID2, ext_dims, prev_dims, vbuf2, cbuf) < 0)
+ TEST_ERROR
+ } /* end else-if */
+ else
+ TEST_ERROR
+ } /* end if */
+ else {
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, ccbuf) < 0)
+ TEST_ERROR
+ if(verify_elmts_two(TWO_CMPD_NULL, ext_dims, prev_dims, ccbuf, cbuf) < 0)
+ TEST_ERROR
+ } /* end else */
+ } /* end if */
+ else { /* DSET_TWO */
+ /* Retrieve the elmemts in BUF */
+ if(H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, iibuf) < 0)
+ TEST_ERROR
+ if(verify_elmts_two(TWO_NONE, ext_dims, prev_dims, iibuf, ibuf) < 0)
+ TEST_ERROR
+ } /* end else */
+ } /* end if */
+ else {
+ /* Verify failure when changes between prev_dims and ext_dims are same/decrease */
+ ret = H5LDget_dset_elmts(did, prev_dims, ext_dims, fields, iibuf);
+ VERIFY_EQUAL(ret, FAIL)
+ } /* end else */
+ } /* end for */
+
+ /* Closing */
+ if(H5Tclose(dtype) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Remove the copied file */
+ HDremove(COPY_FILENAME);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(dtype);
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return(-1);
+} /* test_LD_elmts_two() */
+
+/*
+ * Tests for High Level routines:
+ * H5LDget_dset_dims(), H5LDget_dset_elmts, H5LDget_dset_type_size()
+ */
+int main(void)
+{
+ int nerrors = 0;
+
+ /* Set up temporary buffers for tests: test_LD_elmts_one() & test_LD_elmts_two() */
+ if(NULL == (ibuf = (int *)HDmalloc(sizeof(int) * TEST_BUF_SIZE)))
+ FAIL_STACK_ERROR;
+ if(NULL == (iibuf = (int *)HDmalloc(sizeof(int) * TEST_BUF_SIZE)))
+ FAIL_STACK_ERROR;
+
+ if(NULL == (cbuf = (set_t *)HDmalloc(sizeof(set_t) * TEST_BUF_SIZE)))
+ FAIL_STACK_ERROR;
+ if(NULL == (ccbuf = (set_t *)HDmalloc(sizeof(set_t) * TEST_BUF_SIZE)))
+ FAIL_STACK_ERROR;
+
+ if(NULL == (vbuf1 = (test_valid_fields1 *)HDmalloc(sizeof(test_valid_fields1) * TEST_BUF_SIZE)))
+ FAIL_STACK_ERROR;
+ if(NULL == (vbuf2 = (test_valid_fields2 *)HDmalloc(sizeof(test_valid_fields2) * TEST_BUF_SIZE)))
+ FAIL_STACK_ERROR;
+
+ /*
+ * Testing H5LDget_dset_dims()
+ */
+ nerrors += test_LD_dims_params(FILE);
+ nerrors += test_LD_dims(FILE);
+
+ /*
+ * Testing H5LDget_dset_type_size()
+ */
+ nerrors += test_LD_size(FILE);
+
+ /*
+ * Testing invalid conditions for H5LDget_dset_elmts()
+ */
+ nerrors += test_LD_elmts_invalid(FILE);
+
+ /*
+ * Testing H5LDget_dset_elmts():
+ * 1-dimensional dataset
+ */
+ nerrors += test_LD_elmts_one(FILE, DSET_ONE, NULL);
+
+ /*
+ * Testing H5LDget_dset_elmts():
+ * 1-dimensional dataset w/ compound datatype
+ */
+ nerrors += test_LD_elmts_one(FILE, DSET_CMPD, NULL);
+ nerrors += test_LD_elmts_one(FILE, DSET_CMPD, VALID_FIELDS1);
+ nerrors += test_LD_elmts_one(FILE, DSET_CMPD, VALID_FIELDS2);
+
+ /*
+ * Testing H5LDget_dset_elmts():
+ * 1-dimensional dataset with compound datatype and
+ * member names with escape/separator characters
+ */
+ nerrors += test_LD_elmts_one(FILE, DSET_CMPD_ESC, NULL);
+ nerrors += test_LD_elmts_one(FILE, DSET_CMPD_ESC, VALID_ESC_FIELDS1);
+ nerrors += test_LD_elmts_one(FILE, DSET_CMPD_ESC, VALID_ESC_FIELDS2);
+
+ /*
+ * Testing H5LDget_dset_elmts() for 2-dimensional datasets
+ */
+ nerrors += test_LD_elmts_two(FILE, DSET_TWO, NULL);
+ nerrors += test_LD_elmts_two(FILE, DSET_CMPD_TWO, NULL);
+ nerrors += test_LD_elmts_two(FILE, DSET_CMPD_TWO, VALID_FIELDS1);
+ nerrors += test_LD_elmts_two(FILE, DSET_CMPD_TWO, VALID_FIELDS2);
+
+ /* Free temporary buffers */
+ if(ibuf)
+ HDfree(ibuf);
+ if(iibuf)
+ HDfree(iibuf);
+ if(cbuf)
+ HDfree(cbuf);
+ if(ccbuf)
+ HDfree(ccbuf);
+ if(vbuf1)
+ HDfree(vbuf1);
+ if(vbuf2)
+ HDfree(vbuf2);
+
+ /* check for errors */
+ if(nerrors)
+ goto error;
+
+ puts("All tests for H5LD high level routines passed.");
+
+ return(0);
+
+error:
+ return(1);
+} /* main() */
+
diff --git a/hl/test/test_ld.h5 b/hl/test/test_ld.h5
new file mode 100644
index 0000000..bd5730c
--- /dev/null
+++ b/hl/test/test_ld.h5
Binary files differ
diff --git a/hl/test/test_table_be.h5 b/hl/test/test_table_be.h5
index 3639695..970018e 100644
--- a/hl/test/test_table_be.h5
+++ b/hl/test/test_table_be.h5
Binary files differ
diff --git a/hl/test/test_table_cray.h5 b/hl/test/test_table_cray.h5
index d22dce3..1fcd75b 100644
--- a/hl/test/test_table_cray.h5
+++ b/hl/test/test_table_cray.h5
Binary files differ
diff --git a/hl/test/test_table_le.h5 b/hl/test/test_table_le.h5
index 6c330fd..ee5b532 100644
--- a/hl/test/test_table_le.h5
+++ b/hl/test/test_table_le.h5
Binary files differ
diff --git a/hl/tools/Makefile.am b/hl/tools/Makefile.am
index 5ef8a96..7fab05f 100644
--- a/hl/tools/Makefile.am
+++ b/hl/tools/Makefile.am
@@ -21,7 +21,9 @@
include $(top_srcdir)/config/commence.am
+CONFIG=ordered
+
# All subdirectories
-SUBDIRS=gif2h5
+SUBDIRS=gif2h5 h5watch
include $(top_srcdir)/config/conclude.am
diff --git a/hl/tools/h5watch/Makefile.am b/hl/tools/h5watch/Makefile.am
new file mode 100644
index 0000000..a5891ef
--- /dev/null
+++ b/hl/tools/h5watch/Makefile.am
@@ -0,0 +1,44 @@
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+##
+## Makefile.am
+## Run automake to generate a Makefile.in from this file.
+#
+# HDF5 Library Makefile(.in)
+#
+
+include $(top_srcdir)/config/commence.am
+
+# Include src and tools/lib directories
+AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib -I$(top_srcdir)/hl/src
+
+# These are our main targets, the tools
+TEST_SCRIPT=testh5watch.sh
+check_SCRIPTS=$(TEST_SCRIPT)
+SCRIPT_DEPEND=swmr_check_compat_vfd$(EXEEXT) extend_dset$(EXEEXT) h5watch$(EXEEXT)
+
+bin_PROGRAMS=h5watch
+noinst_PROGRAMS=swmr_check_compat_vfd h5watchgentest extend_dset
+
+# Add h5watch specific linker flags here
+h5watch_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
+
+# Programs all depend on the hdf5 library, the tools library, and the HL
+# library.
+LDADD=$(LIBH5_HL) $(LIBH5TOOLS) $(LIBHDF5)
+#
+CHECK_CLEANFILES+=*.h5
+DISTCLEANFILES=testh5watch.sh
+
+include $(top_srcdir)/config/conclude.am
diff --git a/hl/tools/h5watch/extend_dset.c b/hl/tools/h5watch/extend_dset.c
new file mode 100644
index 0000000..7efdd3b
--- /dev/null
+++ b/hl/tools/h5watch/extend_dset.c
@@ -0,0 +1,489 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5HLprivate2.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+
+/*
+ * Extending datasets in WATCH.h5 generated by h5watchgentest.c
+ */
+#define DSET_ONE "DSET_ONE"
+#define DSET_TWO "DSET_TWO"
+#define DSET_CMPD "DSET_CMPD"
+#define DSET_CMPD_ESC "DSET_CMPD_ESC"
+#define DSET_CMPD_TWO "DSET_CMPD_TWO"
+#define DSET_ALLOC_LATE "DSET_ALLOC_LATE"
+#define DSET_ALLOC_EARLY "DSET_ALLOC_EARLY"
+
+/* The message sent by this process (extend_dset) to the test script to start "h5watch" */
+#define WRITER_MESSAGE "writer_message"
+/* The message received from the test script to start extending dataset */
+#define READER_MESSAGE "reader_message"
+/* Message timeout in seconds */
+#define MESSAGE_TIMEOUT 300
+
+/* Size of data buffer */
+#define TEST_BUF_SIZE 100
+
+/*
+ * Test variations (incremental) for one-dimensional dataset:
+ * Varies from 10->13->12->12->1->3
+ */
+#define ONE_NTESTS 5
+int one_tests[ONE_NTESTS] = {3, -1, 0, -11, 2};
+
+/*
+ * Test variations (incremental) for two-dimensional dataset:
+ * Varies from {4,10}->{6,12}->{8,1}->{10,1}->
+ * {3,3}->{2,2}->{1,2}->
+ * {1,4}->{1,3}->{1,3}
+ */
+#define TWO_NTESTS 9
+int two_tests[TWO_NTESTS][2] = { {2, 2}, {2, -11}, {2, 0},
+ {-7, 2}, {-1, -1}, {-1, 0},
+ {0, 2}, {0, -1}, {0, 0}
+ };
+
+static int extend_dset_two(const char *file, char *dname);
+static int extend_dset_one(const char *file, char *dname);
+void send_message(const char *file);
+static int wait_message(const char *file);
+
+
+/* Data structures for datasets with compound data type */
+typedef struct sub22_t {
+ unsigned int a;
+ unsigned int b;
+ unsigned int c;
+} sub22_t;
+
+typedef struct sub2_t {
+ unsigned int a;
+ sub22_t b;
+ unsigned int c;
+} sub2_t;
+
+typedef struct sub4_t {
+ unsigned int a;
+ unsigned int b;
+} sub4_t;
+
+typedef struct set_t {
+ unsigned int field1;
+ sub2_t field2;
+ double field3;
+ sub4_t field4;
+} set_t;
+
+/*
+ * To send a message by creating the file.
+ */
+void
+send_message(const char *file)
+{
+ FILE *id;
+
+ id = fopen(file, "w+");
+ fclose(id);
+} /* send_message() */
+
+/*
+ *
+ * Repeatedly check for the message file.
+ * It will stop when the file exists or exceeds the timeout limit.
+ */
+static int
+wait_message(const char *file)
+{
+ FILE *id; /* File pointer */
+ time_t t0, t1; /* Time info */
+
+ /* Start timer */
+ time(&t0);
+
+ /* Wait for message from test script to start work */
+ while((id = fopen(file, "r")) == NULL) {
+ /* Get current time */
+ time(&t1);
+ /*
+ * Determine time difference--
+ * if waiting too long for the message, then it is
+ * unlikely the message will get sent, then fail rather
+ * than loop forever.
+ */
+ if(difftime(t1, t0) > MESSAGE_TIMEOUT)
+ goto done;
+ }
+
+ fclose(id);
+ unlink(file);
+ return(SUCCEED);
+
+done:
+ return(FAIL);
+} /* wait_message() */
+
+/*
+ ***********************************************************************
+ *
+ * Extending a two-dimensional dataset:
+ * dims[0] dims[1]
+ * ------- -------
+ * case #1: increase increase
+ * case #2: increase decrease
+ * case #3: increase same
+ * case #4: decrease increase
+ * case #5: decrease decrease (no action)
+ * case #6: decrease same (no action)
+ * case #7: same increase
+ * case #8: same decrease (no action)
+ * case #9: same same (no action)
+ *
+ * two_tests[TWO_NTESTS][2] = { {2,2}, {2,-11}, {2,0},
+ * {-7,2}, {-1,-1}, {-1,0},
+ * {0,2}, {0,-1}, {0,0} }
+ * varies from {4,10}->{6,12}->{8,1}->{10,1}->
+ * {3,3}->{2,2}->{1,2}->
+ * {1,4}->{1,3}->{1,3}
+ ***********************************************************************
+ */
+static int
+extend_dset_two(const char *file, char *dname)
+{
+ hid_t fid = -1; /* file id */
+ hid_t fapl = -1; /* file access property list id */
+ hid_t did = -1; /* dataset id */
+ hid_t sid = -1; /* dataspace id */
+ hid_t dtid = -1; /* dataset's datatype id */
+ int ndims; /* # of dimension sizes */
+ unsigned i, j; /* local index variable */
+ hsize_t ext_dims[2]; /* new dimension sizes after extension */
+ hsize_t cur_dims[2]; /* current dimension sizes */
+ size_t dtype_size; /* size of the dataset's datatype */
+ unsigned num_elmts; /* number of elements in the dataset */
+ int ibuf[TEST_BUF_SIZE]; /* buffer for storing retrieved elements (integer) */
+ set_t cbuf[TEST_BUF_SIZE]; /* buffer for storing retrieved elemnets (compound) */
+
+ /* Create a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* Open the file and dataset with SWMR write */
+ if((fid = H5Fopen(file, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ goto done;
+
+ if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0)
+ goto done;
+
+ /* Send message to the test script to start "h5watch" */
+ send_message(WRITER_MESSAGE);
+
+ if((sid = H5Dget_space(did)) < 0)
+ goto done;
+
+ if((ndims = H5Sget_simple_extent_ndims(sid)) < 0)
+ goto done;
+
+ /* Get the size of the dataset's datatype */
+ if((dtype_size = H5LDget_dset_type_size(did, NULL)) == 0)
+ goto done;
+
+ /* Get the dataset's data type */
+ if((dtid = H5Tget_native_type(H5Dget_type(did), H5T_DIR_DEFAULT)) < 0)
+ goto done;
+
+ /* Wait for message from the test script to start extending dataset */
+ if(wait_message(READER_MESSAGE) < 0)
+ goto done;
+
+ /* Loop through different variations of extending the dataset */
+ for(i = 0; i < TWO_NTESTS; i++) {
+
+ /* sleep to emulate about 2 seconds of application operation */
+ sleep(2);
+
+ /* Get current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0)
+ goto done;
+
+ /* Set up the new extended dimension sizes */
+ ext_dims[0] = cur_dims[0] + (hsize_t)two_tests[i][0];
+ ext_dims[1] = cur_dims[1] + (hsize_t)two_tests[i][1];
+
+ /* Extend the dataset */
+ if(H5Dset_extent(did, ext_dims) < 0)
+ goto done;
+
+ num_elmts = 1;
+ for(j = 0; j < (unsigned)ndims; j++)
+ num_elmts *= (unsigned)ext_dims[j];
+
+ /* Compound type */
+ if(!HDstrcmp(dname, DSET_CMPD_TWO)) {
+
+ HDmemset(cbuf, 0, sizeof(cbuf));
+ for(j = 0; j < num_elmts; j++) {
+ cbuf[j].field1 = i + 1;
+ cbuf[j].field2.a = i + 1;
+ cbuf[j].field2.c = i + 1;
+ cbuf[j].field2.b.a = i + 1;
+ cbuf[j].field2.b.b = i + 1;
+ cbuf[j].field2.b.c = i + 1;
+ cbuf[j].field3 = i + 1;
+ cbuf[j].field4.a = i + 1;
+ cbuf[j].field4.b = i + 1;
+ }
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, cbuf) < 0)
+ goto done;
+ } else { /* Integer type */
+ HDmemset(ibuf, 0, sizeof(ibuf));
+ for(j = 0; j < num_elmts; j++)
+ ibuf[j] = (int)(i + 1);
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf) < 0)
+ goto done;
+ }
+
+ if(H5Dflush(did) < 0)
+ goto done;
+
+ } /* end for TWO_NTESTS */
+
+ /* Closing */
+ if(H5Tclose(dtid) < 0) goto done;
+ if(H5Dclose(did) < 0) goto done;
+ if(H5Pclose(fapl) < 0) goto done;
+ if(H5Fclose(fid) < 0) goto done;
+
+ return(SUCCEED);
+
+done:
+ H5E_BEGIN_TRY
+ H5Tclose(dtid);
+ H5Dclose(did);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5E_END_TRY
+
+ return(FAIL);
+
+} /* extend_dset_two() */
+
+/*
+ ***********************************************************************
+ *
+ * Extending a one-dimensional dataset
+ * Test cases:
+ * #1: increase
+ * #2: decrease
+ * #3: same
+ * #4: decrease
+ * #5: increase
+ *
+ * one_tests[ONE_NTESTS] = {3, -1, 0, -11, 2}
+ * varies from 10->13->12->12->1->3
+ *
+ ***********************************************************************
+ */
+static int
+extend_dset_one(const char *file, char *dname)
+{
+ hid_t fid = -1; /* file id */
+ hid_t fapl = -1; /* file access property list id */
+ hid_t did = -1; /* dataset id */
+ hid_t dtid = -1; /* dataset's datatype id */
+ hid_t sid = -1; /* dataspace id */
+ hid_t mid = -1; /* memory space id */
+ unsigned i, j; /* local index variable */
+ int ibuf[TEST_BUF_SIZE]; /* buffer for storing retrieved elements (integer) */
+ set_t cbuf[TEST_BUF_SIZE]; /* buffer for storing retrieved elemnets (compound) */
+ hsize_t cur_dims[1]; /* current dimension sizes */
+ hsize_t ext_dims[1]; /* new dimension sizes after extension */
+ hsize_t offset[1]; /* starting offsets of appended data */
+ hsize_t count[1]; /* dimension sizes of appended data */
+ size_t dtype_size; /* size of the dataset's datatype */
+
+ /* Create a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* Open the file and dataset with SWMR write */
+ if((fid = H5Fopen(file, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ goto done;
+
+ /* Send message to the test script to start "h5watch" */
+ send_message(WRITER_MESSAGE);
+
+ if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0)
+ goto done;
+
+ /* Get size of the dataset's datatype */
+ if((dtype_size = H5LDget_dset_type_size(did, NULL)) == 0)
+ goto done;
+
+ /* Get dataset's datatype */
+ if((dtid = H5Tget_native_type(H5Dget_type(did), H5T_DIR_DEFAULT)) < 0)
+ goto done;
+
+ /* Wait for message from the test script to start extending dataset */
+ wait_message(READER_MESSAGE);
+
+ /* Loop through different variations of extending the dataset */
+ for(i = 0; i < ONE_NTESTS; i++) {
+
+ /* sleep to emulate about 2 seconds of application operation */
+ sleep(2);
+
+ /* Get current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0)
+ goto done;
+
+ /* Set up the new extended dimension sizes */
+ ext_dims[0] = cur_dims[0] + (hsize_t)one_tests[i];
+
+ /* Extend the dataset */
+ if(H5Dset_extent(did, ext_dims) < 0)
+ goto done;
+
+ /* Write to the new appended region of the dataset */
+ if(one_tests[i] > 0) {
+
+ /* Select the extended region */
+ offset[0] = cur_dims[0];
+ count[0] = (hsize_t)one_tests[i];
+ if((sid = H5Dget_space(did)) < 0)
+ goto done;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, NULL, count, NULL) < 0)
+ goto done;
+
+ /* Set up memory space and get dataset's datatype */
+ if((mid = H5Screate_simple(1, count, NULL)) < 0)
+ goto done;
+
+ /* Initialize data for the extended region of the dataset */
+ /* Compound type */
+ if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
+ HDmemset(cbuf, 0, sizeof(cbuf));
+ for(j = 0; j < (unsigned)one_tests[i]; j++) {
+ cbuf[j].field1 = j + 1;
+ cbuf[j].field2.a = j + 2;
+ cbuf[j].field2.b.a = j + 2;
+ cbuf[j].field2.b.b = j + 2;
+ cbuf[j].field2.b.c = j + 2;
+ cbuf[j].field2.c = j + 2;
+
+ cbuf[j].field3 = j + 3;
+
+ cbuf[j].field4.a = j + 4;
+ cbuf[j].field4.b = j + 4;
+ } /* end for */
+
+ /* Write to the extended region of the dataset */
+ if(H5Dwrite(did, dtid, mid, sid, H5P_DEFAULT, cbuf) < 0)
+ goto done;
+ } else { /* Integer type */
+ for(j = 0; j < (unsigned)one_tests[i]; j++)
+ ibuf[j] = (int)j;
+
+ /* Write to the extended region of the dataset */
+ if(H5Dwrite(did, dtid, mid, sid, H5P_DEFAULT, ibuf) < 0)
+ goto done;
+ }
+
+ /* Closing */
+ if(H5Sclose(sid) < 0) goto done;
+ if(H5Sclose(mid) < 0) goto done;
+ } /* end if */
+
+ if(H5Dflush(did) < 0)
+ goto done;
+
+ } /* end for ONE_NTESTS */
+
+ /* Closing */
+ if(H5Tclose(dtid) < 0) goto done;
+ if(H5Dclose(did) < 0) goto done;
+ if(H5Pclose(fapl) < 0) goto done;
+ if(H5Fclose(fid) < 0) goto done;
+
+ return(SUCCEED);
+
+done:
+ H5E_BEGIN_TRY
+ H5Sclose(sid);
+ H5Sclose(mid);
+ H5Tclose(dtid);
+ H5Dclose(did);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5E_END_TRY
+
+ return(FAIL);
+} /* extend_dset_one() */
+
+/* Usage: extend_dset xx.h5 dname */
+int
+main(int argc, const char *argv[])
+{
+ char *dname = NULL;
+ char *fname = NULL;
+
+ if(argc != 3) {
+ fprintf(stderr, "Should have file name and dataset name to be extended...\n");
+ goto done;
+ }
+
+ /* Get the dataset name to be extended */
+ fname = strdup(argv[1]);
+ dname = strdup(argv[2]);
+
+ if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
+ if(extend_dset_one(fname, dname) < 0)
+ goto done;
+ } else if(!HDstrcmp(dname, DSET_ONE) ||
+ !HDstrcmp(dname, DSET_ALLOC_LATE) ||
+ !HDstrcmp(dname, DSET_ALLOC_EARLY)) {
+ if(extend_dset_one(fname, dname) < 0)
+ goto done;
+ } else if(!HDstrcmp(dname, DSET_TWO) || !HDstrcmp(dname, DSET_CMPD_TWO)) {
+ if(extend_dset_two(fname, dname) < 0)
+ goto done;
+ } else {
+ fprintf(stdout, "Dataset cannot be extended...\n");
+ goto done;
+ }
+ exit(EXIT_SUCCESS);
+
+done:
+ if(dname) HDfree(dname);
+ if(fname) HDfree(fname);
+ exit(EXIT_FAILURE);
+} /* main() */
diff --git a/hl/tools/h5watch/h5watch.c b/hl/tools/h5watch/h5watch.c
new file mode 100644
index 0000000..643b4e7
--- /dev/null
+++ b/hl/tools/h5watch/h5watch.c
@@ -0,0 +1,977 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+#include <float.h>
+
+#include "h5tools.h"
+#include "h5tools_dump.h"
+#include "H5LDprivate.h"
+
+/*
+ * Note: This tool used private routine
+ */
+#define PROGRAMNAME "h5watch" /* Name of tool */
+#define FIELD_SEP "," /* nested field separator */
+#define DEFAULT_RETRY 50 /* number of times to try opening the file */
+
+
+/*
+ * Note:(see comments in hl/src/H5LDprivate.h)
+ * This tool uses private routines H5LD_construct_vector()and H5LD_clean_vector()
+ * This tool uses H5LD_memb_t data structure declared in H5LDprivate.h
+ */
+
+const char *progname = "h5watch"; /* tool name */
+static char *g_list_of_fields = NULL; /* command line input for "list_of_fields" */
+static char *g_dup_fields = NULL; /* copy of "list_of_fields" */
+static H5LD_memb_t **g_listv = NULL; /* vector info for "list_of_fields" */
+
+static hbool_t g_monitor_size_only = FALSE; /* monitor changes in dataset dimension sizes */
+static unsigned g_polling_interval = 1; /* polling interval to check appended data */
+static hbool_t g_label = FALSE; /* label compound values */
+static int g_display_width = 80; /* output width in characters */
+static hbool_t g_simple_output = FALSE; /* make output more machine-readable */
+static unsigned g_retry = DEFAULT_RETRY; /* # of times to try opening the file if somehow file is unstable */
+static hbool_t g_display_hex = FALSE; /* display data in hexadecimal format : LATER */
+
+static herr_t doprint(hid_t did, hsize_t *start, hsize_t *block, int rank);
+static herr_t slicendump(hid_t did, hsize_t *prev_dims, hsize_t *cur_dims,
+ hsize_t *start, hsize_t *block, int rank, int subrank);
+static herr_t monitor_dataset(hid_t fid, char *dsetname);
+static herr_t process_cmpd_fields(hid_t fid, char *dsetname);
+static herr_t check_dataset(hid_t fid, char *dsetname);
+static void leave(int ret);
+static void usage(const char *prog);
+static void parse_command_line(int argc, const char *argv[]);
+
+
+/*
+ * Command-line options: The user can only specify long-named parameters.
+ * The long-named ones can be partially spelled. When
+ * adding more, make sure that they don't clash with each other.
+ */
+static const char *s_opts ="?";
+static struct long_options l_opts[] = {
+ { "help", no_arg, 'h' },
+ { "hel", no_arg, 'h' },
+ { "dim", no_arg, 'd' },
+ { "di", no_arg, 'd' },
+ { "label", no_arg, 'l' },
+ { "labe", no_arg, 'l' },
+ { "lab", no_arg, 'l' },
+ { "la", no_arg, 'l' },
+ { "simple", no_arg, 'S' },
+ { "simpl", no_arg, 'S' },
+ { "simp", no_arg, 'S' },
+ { "sim", no_arg, 'S' },
+ { "si", no_arg, 'S' },
+ { "hexdump", no_arg, 'x' },
+ { "hexdum", no_arg, 'x' },
+ { "hexdu", no_arg, 'x' },
+ { "hexd", no_arg, 'x' },
+ { "hex", no_arg, 'x' },
+ { "width", require_arg, 'w' },
+ { "widt", require_arg, 'w' },
+ { "wid", require_arg, 'w' },
+ { "wi", require_arg, 'w' },
+ { "polling", require_arg, 'p' },
+ { "pollin", require_arg, 'p' },
+ { "polli", require_arg, 'p' },
+ { "poll", require_arg, 'p' },
+ { "pol", require_arg, 'p' },
+ { "po", require_arg, 'p' },
+ { "fields", require_arg, 'f' },
+ { "field", require_arg, 'f' },
+ { "fiel", require_arg, 'f' },
+ { "fie", require_arg, 'f' },
+ { "fi", require_arg, 'f' },
+ { "version", no_arg, 'V' },
+ { "versio", no_arg, 'V' },
+ { "versi", no_arg, 'V' },
+ { "vers", no_arg, 'V' },
+ { "ver", no_arg, 'V' },
+ { "ve", no_arg, 'V' },
+ { NULL, 0, '\0' }
+};
+
+/*-------------------------------------------------------------------------
+ * Function: doprint()
+ *
+ * Purpose: Prepare to print the dataset's appended data.
+ * Call the tools library routine h5tools_dump_dset() to do the printing.
+ * (This routine is mostly copied from dump_dataset_values() in tools/h5ls/h5ls.c
+ * and modified accordingly).
+ *
+ * Return: 0 on success; negative on failure
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+doprint(hid_t did, hsize_t *start, hsize_t *block, int rank)
+{
+ h5tools_context_t ctx; /* print context */
+ h5tool_format_t info; /* Format info for the tools library */
+ static char fmt_double[16], fmt_float[16]; /* Format info */
+ struct subset_t subset; /* Subsetting info */
+ hsize_t ss_start[H5S_MAX_RANK]; /* Info for hyperslab */
+ hsize_t ss_stride[H5S_MAX_RANK]; /* Info for hyperslab */
+ hsize_t ss_block[H5S_MAX_RANK]; /* Info for hyperslab */
+ hsize_t ss_count[H5S_MAX_RANK]; /* Info for hyperslab */
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ /* Subsetting information for the tools library printing routines */
+ subset.start.data = ss_start;
+ subset.stride.data = ss_stride;
+ subset.block.data = ss_block;
+ subset.count.data = ss_count;
+
+ /* Initialize subsetting information */
+ for(i = 0; i < rank; i++) {
+ subset.stride.data[i] = 1;
+ subset.count.data[i] = 1;
+ subset.start.data[i] = start[i];
+ subset.block.data[i] = block[i];
+ }
+
+ HDmemset(&ctx, 0, sizeof(ctx));
+
+ /* Set to all default values and then override */
+ HDmemset(&info, 0, sizeof info);
+
+ if(g_simple_output) {
+ info.idx_fmt = "";
+ info.line_ncols = 65535; /*something big*/
+ info.line_per_line = 1;
+ info.line_multi_new = 0;
+ info.line_pre = " ";
+ info.line_cont = " ";
+
+ info.arr_pre = "";
+ info.arr_suf = "";
+ info.arr_sep = " ";
+
+ info.cmpd_pre = "";
+ info.cmpd_suf = "";
+ info.cmpd_sep = " ";
+
+ /* The "fields" selected by the user */
+ info.cmpd_listv = (const struct H5LD_memb_t **)g_listv;
+
+ if(g_label) info.cmpd_name = "%s=";
+
+ info.elmt_suf1 = " ";
+ info.str_locale = ESCAPE_HTML;
+
+ } else {
+ info.idx_fmt = "(%s)";
+ if(!g_display_width) {
+ info.line_ncols = 65535;
+ info.line_per_line = 1;
+ }
+ else
+ info.line_ncols = (unsigned)g_display_width;
+
+ info.line_multi_new = 1;
+
+ /* The "fields" selected by the user */
+ info.cmpd_listv = (const struct H5LD_memb_t **)g_listv;
+ if(g_label) info.cmpd_name = "%s=";
+ info.line_pre = " %s ";
+ info.line_cont = " %s ";
+ info.str_repeat = 8;
+ }
+
+ /* Floating point types should display full precision */
+ sprintf(fmt_float, "%%1.%dg", FLT_DIG);
+ info.fmt_float = fmt_float;
+ sprintf(fmt_double, "%%1.%dg", DBL_DIG);
+ info.fmt_double = fmt_double;
+
+ info.dset_format = "DSET-%s ";
+ info.dset_hidefileno = 0;
+
+ info.obj_format = "-%lu:"H5_PRINTF_HADDR_FMT;
+ info.obj_hidefileno = 0;
+
+ info.dset_blockformat_pre = "%sBlk%lu: ";
+ info.dset_ptformat_pre = "%sPt%lu: ";
+
+ info.line_indent = "";
+
+ if(g_display_hex) {
+ /* Print all data in hexadecimal format if the `-x' or `--hexdump'
+ * command line switch was given. */
+ info.raw = TRUE;
+ }
+
+ /* Print the values. */
+ if((ret_value = h5tools_dump_dset(stdout, &info, &ctx, did, &subset)) < 0)
+ error_msg("unable to print data\n");
+
+ HDfprintf(stdout, "\n");
+
+ return(ret_value);
+
+} /* doprint() */
+
+/*-------------------------------------------------------------------------
+ * Function: slicendump
+ *
+ * Purpose: To dump the slice for each dimension
+ * For example: prev_dims[2] = {5, 4}; cur_dims[2] = {7, 8}
+ * This routine will dump data as follows:
+ * {0, 3} to {0, 7} (1x4 elements)
+ * {1, 3} to {0, 7} (1x4 elements)
+ * {2, 3} to {0, 7} (1x4 elements)
+ * {3, 3} to {0, 7} (1x4 elements)
+ * {4, 3} to {0, 7} (1x4 elements)
+ * {5, 0} to {6, 7} (2x8 elements)
+ *
+ * Return: Non-negative on success
+ * Negative on failure
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+slicendump(hid_t did, hsize_t *prev_dims, hsize_t *cur_dims, hsize_t *start, hsize_t *block, int rank, int subrank)
+{
+ int i; /* Local index variable */
+ int ind; /* Index for the current rank */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ ind = rank - subrank;
+
+ if((subrank - 1) > 0) {
+ /* continue onto the next dimension */
+ for (i = 0; i < (hssize_t)MIN(prev_dims[ind], cur_dims[ind]); i++){
+ start[ind] = (hsize_t)i;
+ if((ret_value = slicendump(did, prev_dims, cur_dims, start, block, rank, subrank-1)) < 0)
+ goto done;
+ }
+ }
+
+ /* this dimension remains the same or shrinking */
+ if(cur_dims[ind] <= prev_dims[ind])
+ goto done;
+
+ /* select first the slice for the faster changing dimension */
+ /* select later the whole slice for the slower changing dimension */
+ start[ind] = prev_dims[ind];
+ block[ind] = cur_dims[ind] - prev_dims[ind];
+
+ for(i = ind + 1; i < rank; i++){
+ start[i] = 0;
+ block[i] = cur_dims[i];
+ }
+
+ /* Print the appended data */
+ ret_value = doprint(did, start, block, rank);
+
+done:
+ return(ret_value);
+} /* slicendump() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: monitor_dataset
+ *
+ * Purpose: To poll a dataset periodically for changes in dimension sizes.
+ * For dataset with unchanged and/or decreased dimension sizes:
+ * it just prints the dimension size changes
+ * For dataset with increase in at least one of its dimension sizes:
+ * it will print the new appended data to the dataset
+ *
+ * Return: Non-negative on success: dataset can be monitored
+ * Negative on failure: dataset cannot be monitored
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+monitor_dataset(hid_t fid, char *dsetname)
+{
+ hid_t did; /* dataset id */
+ hid_t sid; /* dataspace id */
+ int ndims; /* # of dimensions in the dataspace */
+ int i, u; /* local index variable */
+ hsize_t prev_dims[H5S_MAX_RANK]; /* current dataspace dimensions */
+ hsize_t cur_dims[H5S_MAX_RANK]; /* previous dataspace dimensions */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ HDfprintf(stdout, "Monitoring dataset %s...\n", dsetname);
+
+ /* Open the dataset for minitoring */
+ if((did = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) {
+ error_msg("error in opening dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+ if((sid = H5Dget_space(did)) < 0) {
+ error_msg("error in getting dataspace id for dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Get the dataset's dimension sizes */
+ if((ndims = H5Sget_simple_extent_dims(sid, prev_dims, NULL)) < 0) {
+ error_msg("unable to get dimensions sizes for \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ while(1) {
+
+ /* Refreshes the dataset */
+ if(H5Drefresh(did) < 0) {
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Get the dataset's current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0) {
+ error_msg("unable to get dimension sizes for \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Check the dimension sizes */
+ for(i = 0; i < ndims; i++)
+ if(cur_dims[i] != prev_dims[i])
+ break;
+
+ /* at least one dimension has changed */
+ if(i != ndims) {
+ /* Printing changes in dimension sizes */
+ for(u = 0; u < ndims; u++) {
+ HDfprintf(stdout, "dimension %u: %Hu->%Hu", (unsigned)u, prev_dims[u], cur_dims[u]);
+ if(cur_dims[u] > prev_dims[u])
+ HDfprintf(stdout, " (increases)\n");
+ else if(cur_dims[u] < prev_dims[u])
+ HDfprintf(stdout, " (decreases)\n");
+ else
+ HDfprintf(stdout, " (unchanged)\n");
+ }
+
+ /* Printing elements appended to the dataset if there is */
+ if(!g_monitor_size_only) {
+
+ /* See if at least one dimension size has increased */
+ for(u = 0; u < ndims; u++) {
+ int j;
+ hsize_t start[H5S_MAX_RANK];
+ hsize_t block[H5S_MAX_RANK];
+
+ /* Print the new appended data to the dataset */
+ if(cur_dims[u] > prev_dims[u]) {
+ HDfprintf(stdout, " Data:\n");
+
+ for(j = 0; j < ndims; j++) {
+ start[j] = 0;
+ block[j] = 1;
+ }
+
+ if((ret_value = slicendump(did, prev_dims, cur_dims, start, block, ndims, ndims)) < 0)
+ goto done;
+ break;
+ }
+ } /* end for */
+ }
+ HDfflush(stdout);
+ }
+
+ /* Save the current dimension sizes */
+ HDmemcpy(prev_dims, cur_dims, (size_t)ndims * sizeof(hsize_t));
+
+ /* Sleep before next monitor */
+ HDsleep(g_polling_interval);
+ } /* end while */
+
+done:
+ /* Closing */
+ H5E_BEGIN_TRY
+ H5Dclose(did);
+ H5E_END_TRY
+
+ return(ret_value);
+} /* monitor_dataset() */
+
+/*-------------------------------------------------------------------------
+ * Function: process_cmpd_fields
+ *
+ * Purpose: To check whether the fields selected in "g_list_of_fields"
+ * are valid fields associated with the dataset.
+ *
+ * Return: 0 on success; negative on failure
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+process_cmpd_fields(hid_t fid, char *dsetname)
+{
+ hid_t did=-1; /* dataset id */
+ hid_t dtid=-1, tid=-1; /* dataset's data type id */
+ size_t len; /* number of comma-separated fields in "g_list_of_fields" */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ HDassert(g_list_of_fields && *g_list_of_fields);
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) {
+ error_msg("error in opening dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Get the dataset's datatype */
+ if(((dtid = H5Dget_type(did)) < 0) || (tid = H5Tget_native_type(dtid, H5T_DIR_DEFAULT)) < 0) {
+ error_msg("error in getting dataset's datatype\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Check to make sure that the dataset's datatype is compound type */
+ if(H5Tget_class(dtid) != H5T_COMPOUND) {
+ error_msg("dataset should be compound type for <list_of_fields>\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Make a copy of "g_list_of_fields" */
+ if((g_dup_fields = HDstrdup(g_list_of_fields)) == NULL) {
+ error_msg("error in duplicating g_list_of_fields\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Estimate the number of comma-separated fields in "g_list of_fields" */
+ len = HDstrlen(g_list_of_fields)/2 + 2;
+
+ /* Allocate memory for a list vector of H5LD_memb_t structures to store "g_list_of_fields" info */
+ if((g_listv = (H5LD_memb_t **)HDcalloc(len, sizeof(H5LD_memb_t *))) == NULL) {
+ error_msg("error in allocating memory for H5LD_memb_t\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Process and store info for "g_listv" */
+ if(H5LD_construct_vector(g_dup_fields, g_listv, tid) < 0) {
+ error_msg("error in processing <list_of_fields>\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Will free memory for g_listv and g_dup_fields when exiting from h5watch */
+done:
+ /* Closing */
+ H5E_BEGIN_TRY
+ H5Tclose(dtid);
+ H5Tclose(tid);
+ H5Dclose(did);
+ H5E_END_TRY
+ return(ret_value);
+} /* process_cmpd_fields() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: To check whether a dataset can be monitored:
+ A chunked dataset with unlimited or max. dimension setting
+ *
+ * Return: Non-negative on success: dataset can be monitored
+ * Negative on failure: dataset cannot be monitored
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+check_dataset(hid_t fid, char *dsetname)
+{
+ hid_t did=-1; /* Dataset id */
+ hid_t dcp=-1; /* Dataset creation property */
+ hid_t sid=-1; /* Dataset's dataspace id */
+ int ndims; /* # of dimensions in the dataspace */
+ unsigned u; /* Local index variable */
+ hsize_t cur_dims[H5S_MAX_RANK]; /* size of dataspace dimensions */
+ hsize_t max_dims[H5S_MAX_RANK]; /* maximum size of dataspace dimensions */
+ hbool_t unlim_max_dims = FALSE; /* whether dataset has unlimited or max. dimension setting */
+ void *edata;
+ H5E_auto2_t func;
+ H5D_layout_t layout;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ /* Disable error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &func, &edata);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) {
+ error_msg("unable to open dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Get dataset's creation property list */
+ if((dcp = H5Dget_create_plist(did)) < 0) {
+ error_msg("unable to get dataset's creation property list \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Query dataset's layout; the layout should be chunked or virtual */
+ if((layout = H5Pget_layout(dcp)) < 0) {
+ error_msg("unable to get dataset layout \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+ if(layout != H5D_CHUNKED && layout != H5D_VIRTUAL) {
+ error_msg("\"%s\" should be a chunked or virtual dataset\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ HDmemset(cur_dims, 0, sizeof cur_dims);
+ HDmemset(max_dims, 0, sizeof max_dims);
+
+ /* Get dataset's dataspace */
+ if((sid = H5Dget_space(did)) < 0) {
+ error_msg("can't get dataset's dataspace\"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Get dimension size of dataset's dataspace */
+ if((ndims = H5Sget_simple_extent_dims(sid, cur_dims, max_dims)) < 0) {
+ error_msg("can't get dataspace dimensions for dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
+
+ /* Check whether dataset has unlimited dimension or max. dimension setting */
+ for(u = 0; u < (unsigned)ndims; u++)
+ if(max_dims[u] == H5S_UNLIMITED || cur_dims[u] != max_dims[u]) {
+ unlim_max_dims = TRUE;
+ break;
+ }
+
+ if(!unlim_max_dims) {
+ error_msg("\"%s\" should have unlimited or max. dimension setting\n", dsetname);
+ ret_value = FAIL;
+ }
+
+done:
+ H5Eset_auto2(H5E_DEFAULT, func, edata);
+
+ /* Closing */
+ H5E_BEGIN_TRY
+ H5Sclose(sid);
+ H5Pclose(dcp);
+ H5Dclose(did);
+ H5E_END_TRY
+
+ return(ret_value);
+} /* check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: leave
+ *
+ * Purpose: Close the H5 Tools library and exit
+ *
+ * Return: Does not return
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+leave(int ret)
+{
+ h5tools_close();
+
+ exit(ret);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: Print the usage message about h5watch (only long options)
+ *
+ * Return: void
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ * Modifications:
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(const char *prog)
+{
+ HDfflush(stdout);
+ HDfprintf(stdout, "Usage: %s [OPTIONS] [OBJECT]\n", prog);
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, " OPTIONS\n");
+ HDfprintf(stdout, " --help Print a usage message and exit.\n");
+ HDfprintf(stdout, " --version Print version number and exit.\n");
+ HDfprintf(stdout, " --label Label members of compound typed dataset.\n");
+ HDfprintf(stdout, " --simple Use a machine-readable output format.\n");
+ HDfprintf(stdout, " --dim Monitor changes in size of dataset dimensions only.\n");
+ HDfprintf(stdout, " --width=N Set the number of columns to N for output.\n");
+ HDfprintf(stdout, " A value of 0 sets the number of columns to the\n");
+ HDfprintf(stdout, " maximum (65535). The default width is 80 columns.\n");
+ HDfprintf(stdout, " --polling=N Set the polling interval to N (in seconds) when the\n");
+ HDfprintf(stdout, " dataset will be checked for appended data. The default\n");
+ HDfprintf(stdout, " polling interval is 1.\n");
+ HDfprintf(stdout, " --fields=<list_of_fields>\n");
+ HDfprintf(stdout, " Display data for the fields specified in <list_of_fields>\n");
+ HDfprintf(stdout, " for a compound data type. <list_of_fields> can be\n");
+ HDfprintf(stdout, " specified as follows:\n");
+ HDfprintf(stdout, " 1) A comma-separated list of field names in a\n");
+ HDfprintf(stdout, " compound data type. \",\" is the separator\n");
+ HDfprintf(stdout, " for field names while \".\" is the separator\n");
+ HDfprintf(stdout, " for a nested field.\n");
+ HDfprintf(stdout, " 2) A single field name in a compound data type.\n");
+ HDfprintf(stdout, " Can use this option multiple times.\n");
+ HDfprintf(stdout, " Note that backslash is the escape character to avoid\n");
+ HDfprintf(stdout, " characters in field names that conflict with the tool's\n");
+ HDfprintf(stdout, " separators.\n");
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, " OBJECT is specified as [<filename>/<path_to_dataset>/<dsetname>]\n");
+ HDfprintf(stdout, " <filename> Name of the HDF5 file. It may be preceded by path\n");
+ HDfprintf(stdout, " separated by slashes to the specified HDF5 file.\n");
+ HDfprintf(stdout, " <path_to_dataset> Path separated by slashes to the specified dataset\n");
+ HDfprintf(stdout, " <dsetname> Name of the dataset\n");
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, " User can end the h5watch process by ctrl-C (SIGINT) or kill the process (SIGTERM).\n");
+
+} /* usage() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: parse_command_line
+ *
+ * Purpose: Parse the command line for h5watch (take only long options)
+ *
+ * Return: Success: Set the corresponding command flags and return void
+ * Failure: Exits program with EXIT_FAILURE value.
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+parse_command_line(int argc, const char *argv[])
+{
+ int opt; /* Command line option */
+ int tmp;
+
+ /* no arguments */
+ if (argc == 1) {
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+
+ /* parse command line options */
+ while ((opt = get_option(argc, argv, s_opts, l_opts)) != EOF) {
+ switch ((char)opt) {
+ case '?':
+ case 'h': /* --help */
+ usage(h5tools_getprogname());
+ leave(EXIT_SUCCESS);
+
+ case 'V': /* --version */
+ print_version(progname);
+ leave(EXIT_SUCCESS);
+ break;
+
+ case 'w': /* --width=N */
+ g_display_width = (int)HDstrtol(opt_arg, NULL, 0);
+ if(g_display_width < 0) {
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+ break;
+
+ case 'd': /* --dim */
+ g_monitor_size_only = TRUE;
+ break;
+
+ case 'S': /* --simple */
+ g_simple_output = TRUE;
+ break;
+
+ case 'l': /* --label */
+ g_label = TRUE;
+ break;
+
+ case 'p': /* --polling=N */
+ /* g_polling_interval = HDstrtod(opt_arg, NULL); */
+ if((tmp = (int)HDstrtol(opt_arg, NULL, 10)) <= 0) {
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+ g_polling_interval = (unsigned)tmp;
+ break;
+
+ case 'f': /* --fields=<list_of_fields> */
+ if(g_list_of_fields == NULL) {
+ if((g_list_of_fields = HDstrdup(opt_arg)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ leave(EXIT_FAILURE);
+ }
+ } else {
+ char *str;
+
+ if((str = HDstrdup(opt_arg)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ leave(EXIT_FAILURE);
+ }
+ if((g_list_of_fields = (char *)HDrealloc(g_list_of_fields, HDstrlen(g_list_of_fields) + HDstrlen(str) + 2)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ leave(EXIT_FAILURE);
+
+ }
+ HDstrcat(g_list_of_fields, FIELD_SEP);
+ HDstrcat(g_list_of_fields, str);
+ }
+
+ break;
+
+ default:
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+ }
+
+
+ /* check for object to be processed */
+ if (argc <= opt_ind) {
+ error_msg("missing dataset name\n");
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+} /* parse_command_line() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: catch_signal
+ *
+ * Purpose: The signal handler to catch the signals:
+ * SIGTERM and SIGINT and exit from h5watch
+ *
+ * Return: No return
+ *
+ * Programmer: Vailin Choi; November 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static void catch_signal(int H5_ATTR_UNUSED signo)
+{
+ /* Exit from h5watch */
+ leave(EXIT_SUCCESS);
+
+} /* catch_signal() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: h5watch
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; August 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, const char *argv[])
+{
+ char drivername[50];
+ char *fname = NULL;
+ char *dname = NULL;
+ void *edata;
+ H5E_auto2_t func;
+ char *x;
+ hid_t fid = -1;
+ hid_t fapl = -1;
+
+ /* Set up tool name and exit status */
+ h5tools_setprogname(PROGRAMNAME);
+ h5tools_setstatus(EXIT_SUCCESS);
+
+ /* Disable error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &func, &edata);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ /* Initialize h5tools lib */
+ h5tools_init();
+
+ /* To exit from h5watch for SIGTERM signal */
+ if(HDsignal(SIGTERM, catch_signal) == SIG_ERR) {
+ error_msg("An error occurred while setting a signal handler.\n");
+ leave(EXIT_FAILURE);
+ }
+
+ /* To exit from h5watch for SIGINT signal */
+ if(HDsignal(SIGINT, catch_signal) == SIG_ERR) {
+ error_msg("An error occurred while setting a signal handler.\n");
+ leave(EXIT_FAILURE);
+ }
+
+ /* parse command line options */
+ parse_command_line(argc, argv);
+
+ if(argc <= opt_ind) {
+ error_msg("missing dataset name\n");
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+
+ /* Mostly copied from tools/h5ls coding & modified accordingly */
+ /*
+ * [OBJECT] is specified as
+ * [<filename>/<path_to_dataset>/<dsetname>]
+ *
+ * Example: ../dir1/foo/bar/dset
+ * \_________/\______/
+ * file obj
+ *
+ * The dichotomy is determined by calling H5Fopen() repeatedly until it
+ * succeeds. The first call uses the entire name and each subsequent call
+ * chops off the last component. If we reach the beginning of the name
+ * then there must have been something wrong with the file (perhaps it
+ * doesn't exist).
+ */
+ if((fname = HDstrdup(argv[opt_ind])) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ h5tools_setstatus(EXIT_FAILURE);
+ }
+
+ /* Create a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ do {
+ while(fname && *fname) {
+ fid = h5tools_fopen(fname, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl, NULL, drivername, sizeof drivername);
+
+ if(fid >= 0) {
+ HDfprintf(stdout, "Opened \"%s\" with %s driver.\n", fname, drivername);
+ break; /*success*/
+ } /* end if */
+
+ /* Shorten the file name; lengthen the object name */
+ x = dname;
+ dname = HDstrrchr(fname, '/');
+ if(x)
+ *x = '/';
+ if(!dname)
+ break;
+ *dname = '\0';
+ } /* end while */
+ /* Try opening the file again if somehow unstable */
+ } while(g_retry-- > 0 && fid == FAIL);
+
+ if(fid < 0) {
+ error_msg("unable to open file \"%s\"\n", fname);
+ if(fname) HDfree(fname);
+ if(fapl >= 0) H5Pclose(fapl);
+ leave(EXIT_FAILURE);
+ }
+
+ if(!dname) {
+ error_msg("no dataset specified\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ } else {
+ *dname = '/';
+ x = dname;
+ if((dname = HDstrdup(dname)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ h5tools_setstatus(EXIT_FAILURE);
+ } else {
+ *x = '\0';
+ /* Validate dataset */
+ if(check_dataset(fid, dname) < 0)
+ h5tools_setstatus(EXIT_FAILURE);
+ /* Validate input "fields" */
+ else if(g_list_of_fields && *g_list_of_fields)
+ if(process_cmpd_fields(fid, dname) < 0)
+ h5tools_setstatus(EXIT_FAILURE);
+ }
+ }
+
+ /* If everything is fine, start monitoring the datset */
+ if(h5tools_getstatus() != EXIT_FAILURE)
+ if(monitor_dataset(fid, dname) < 0)
+ h5tools_setstatus(EXIT_FAILURE);
+
+ /* Free spaces */
+ if(fname) HDfree(fname);
+ if(dname) HDfree(dname);
+ if(g_list_of_fields) HDfree(g_list_of_fields);
+ if(g_listv) {
+ H5LD_clean_vector(g_listv);
+ HDfree(g_listv);
+ }
+ if(g_dup_fields) HDfree(g_dup_fields);
+
+ /* Close the file access property list */
+ if(fapl >= 0 && H5Pclose(fapl) < 0) {
+ error_msg("unable to close file access property list\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ }
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0) {
+ error_msg("unable to close file\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ }
+
+ H5Eset_auto2(H5E_DEFAULT, func, edata);
+ /* exit */
+ leave(h5tools_getstatus());
+} /* main() */
diff --git a/hl/tools/h5watch/h5watchgentest.c b/hl/tools/h5watch/h5watchgentest.c
new file mode 100644
index 0000000..d70a690
--- /dev/null
+++ b/hl/tools/h5watch/h5watchgentest.c
@@ -0,0 +1,355 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "hdf5.h"
+#include "H5HLprivate2.h"
+#include <time.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <string.h>
+
+/*
+ * WATCH.h5: file with various types of datasets for testing--
+ *
+ * The following datasets are chunked, H5D_ALLOC_TIME_INCR, max. dimensional setting:
+ * DSET_ONE: one-dimensional dataset
+ * DSET_TWO: two-dimensional dataset
+ * DSET_CMPD: one-dimensional dataset with compound type
+ * DSET_CMPD_ESC: one-dimensional dataset with compound type and member names with
+ * escape/separator characters
+ * DSET_CMPD_TWO: two-dimensional dataset with compound type
+ *
+ * The following datasets are one-dimensional, chunked, max. dimension setting:
+ * DSET_ALLOC_EARLY: dataset with H5D_ALLOC_TIME_EARLY
+ * DSET_ALLOC_LATE: dataset H5D_ALLOC_TIME_LATE
+ *
+ * The following datasets are one-dimensional:
+ * DSET_NONE: fixed dimension setting, contiguous, H5D_ALLOC_TIME_LATE
+ * DSET_NOMAX: fixed dimension setting, chunked, H5D_ALLOC_TIME_INCR
+ */
+#define ONE_DIMS0 10
+#define MAX_ONE_DIMS0 100
+
+#define DSET_ONE "DSET_ONE"
+#define DSET_NONE "DSET_NONE"
+#define DSET_NOMAX "DSET_NOMAX"
+#define DSET_ALLOC_LATE "DSET_ALLOC_LATE"
+#define DSET_ALLOC_EARLY "DSET_ALLOC_EARLY"
+#define DSET_CMPD "DSET_CMPD"
+#define DSET_CMPD_ESC "DSET_CMPD_ESC"
+
+#define TWO_DIMS0 4
+#define TWO_DIMS1 10
+#define MAX_TWO_DIMS0 60
+#define MAX_TWO_DIMS1 100
+
+#define DSET_TWO "DSET_TWO"
+#define DSET_CMPD_TWO "DSET_CMPD_TWO"
+
+#define CHUNK_SIZE 2
+
+#define FILE "WATCH.h5"
+
+/* Data structures for datasets with compound types */
+typedef struct sub22_t {
+ unsigned int a;
+ unsigned int b;
+ unsigned int c;
+} sub22_t;
+
+typedef struct sub2_t {
+ unsigned int a;
+ sub22_t b;
+ unsigned int c;
+} sub2_t;
+
+typedef struct sub4_t {
+ unsigned int a;
+ unsigned int b;
+} sub4_t;
+
+typedef struct set_t {
+ unsigned int field1;
+ sub2_t field2;
+ double field3;
+ sub4_t field4;
+} set_t;
+
+/*
+ **************************************************************************************
+ *
+ * Create a dataset with the given input parameters
+ * Write to the dataset with the given "data"
+ *
+ **************************************************************************************
+ */
+static int
+generate_dset(hid_t fid, const char *dname, int ndims, hsize_t *dims, hsize_t *maxdims, hid_t dtid, void *data)
+{
+ hid_t dcpl=-1; /* Dataset creation property */
+ hid_t did=-1; /* Dataset id */
+ hid_t sid=-1; /* Dataspace id */
+ int i; /* Local index variable */
+ hsize_t chunk_dims[H5S_MAX_RANK]; /* Dimension sizes for chunks */
+
+ /* Create the dataspace */
+ if((sid = H5Screate_simple(ndims, dims, maxdims)) < 0)
+ goto done;
+
+ /* Set up dataset's creation properties */
+ if(!HDstrcmp(dname, DSET_NONE))
+ dcpl = H5P_DEFAULT;
+ else {
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto done;
+ for(i = 0; i < ndims; i++)
+ chunk_dims[i] = CHUNK_SIZE;
+ if(H5Pset_chunk(dcpl, ndims, chunk_dims) < 0)
+ goto done;
+ }
+
+ if(!HDstrcmp(dname, DSET_ALLOC_LATE)) {
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE) < 0)
+ goto done;
+ } else if(!HDstrcmp(dname, DSET_ALLOC_EARLY)) {
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
+ goto done;
+ }
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, dname, dtid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto done;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ goto done;
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0) goto done;
+ if(H5Sclose(sid) < 0) goto done;
+ if(H5Dclose(did) < 0) goto done;
+
+ return(SUCCEED);
+
+done:
+ H5E_BEGIN_TRY
+ H5Sclose(sid);
+ H5Pclose(dcpl);
+ H5Dclose(did);
+ H5E_END_TRY
+
+ return(FAIL);
+} /* generate_dset() */
+
+int
+main(void)
+{
+ hid_t fid=-1; /* File id */
+ hid_t fapl=-1; /* File access property list id */
+ hsize_t cur_dims[1]; /* Dimension sizes */
+ hsize_t max_dims[1]; /* Maximum dimension sizes */
+ hsize_t cur2_dims[2]; /* Current dimension sizes */
+ hsize_t max2_dims[2]; /* Maximum dimension sizes */
+ hid_t set_tid=-1, esc_set_tid=-1; /* Compound type id */
+ hid_t sub22_tid=-1; /* Compound type id */
+ hid_t sub2_tid=-1, esc_sub2_tid=-1; /* Compound type id */
+ hid_t sub4_tid=-1, esc_sub4_tid=-1; /* Compound type id */
+ int one_data[ONE_DIMS0]; /* Buffer for data */
+ int two_data[TWO_DIMS0*TWO_DIMS1]; /* Buffer for data */
+ set_t one_cbuf[ONE_DIMS0]; /* Buffer for data with compound type */
+ set_t two_cbuf[TWO_DIMS0*TWO_DIMS1]; /* Buffer for data with compound type */
+ int i; /* Local index variable */
+
+ /* Create a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* Create a file with the latest format */
+ if((fid = H5Fcreate(FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto done;
+
+ /* Initialization for one-dimensional dataset */
+ cur_dims[0] = ONE_DIMS0;
+ max_dims[0] = MAX_ONE_DIMS0;
+ for(i = 0; i < ONE_DIMS0; i++)
+ one_data[i] = i;
+
+ /* Generate DSET_ONE, DSET_NONE, DSET_NOMAX, DSET_ALLOC_LATE, DSET_EARLY */
+ if(generate_dset(fid, DSET_ONE, 1, cur_dims, max_dims, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_NONE, 1, cur_dims, NULL, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_NOMAX, 1, cur_dims, NULL, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_ALLOC_LATE, 1, cur_dims, max_dims, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_ALLOC_EARLY, 1, cur_dims, max_dims, H5T_NATIVE_INT, one_data) < 0)
+ goto done;
+
+ /* Initialization for two-dimensional dataset */
+ cur2_dims[0] = TWO_DIMS0;
+ cur2_dims[1] = TWO_DIMS1;
+ max2_dims[0] = MAX_TWO_DIMS0;
+ max2_dims[1] = MAX_TWO_DIMS1;
+
+ for(i = 0; i < (TWO_DIMS0 * TWO_DIMS1); i++)
+ two_data[i] = i;
+
+ /* Generate DSET_TWO */
+ if(generate_dset(fid, DSET_TWO, 2, cur2_dims, max2_dims, H5T_NATIVE_INT, two_data) < 0)
+ goto done;
+
+ /* Initialization for one-dimensional compound typed dataset */
+ cur_dims[0] = ONE_DIMS0;
+ max_dims[0] = MAX_ONE_DIMS0;
+
+ for (i = 0; i < ONE_DIMS0; i++) {
+ one_cbuf[i].field1 = 1;
+ one_cbuf[i].field2.a = 2;
+ one_cbuf[i].field2.c = 4;
+ one_cbuf[i].field2.b.a = 20;
+ one_cbuf[i].field2.b.b = 40;
+ one_cbuf[i].field2.b.c = 80;
+ one_cbuf[i].field3 = 3.0F;
+ one_cbuf[i].field4.a = 4;
+ one_cbuf[i].field4.b = 8;
+ }
+
+ /* Create the compound type */
+ if((sub22_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub22_t))) < 0)
+ goto done;
+ if(H5Tinsert(sub22_tid, "a", HOFFSET(sub22_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub22_tid, "b", HOFFSET(sub22_t, b), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub22_tid, "c", HOFFSET(sub22_t, c), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((sub2_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub2_t))) < 0)
+ goto done;
+ if(H5Tinsert(sub2_tid, "a", HOFFSET(sub2_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub2_tid, "b", HOFFSET(sub2_t, b), sub22_tid) < 0)
+ goto done;
+ if(H5Tinsert(sub2_tid, "c", HOFFSET(sub2_t, c), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((sub4_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub4_t))) < 0)
+ goto done;
+ if(H5Tinsert(sub4_tid, "a", HOFFSET(sub4_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(sub4_tid, "b", HOFFSET(sub4_t, b), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((set_tid = H5Tcreate(H5T_COMPOUND, sizeof(set_t))) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field1", HOFFSET(set_t, field1), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field2", HOFFSET(set_t, field2), sub2_tid) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field3", HOFFSET(set_t, field3), H5T_NATIVE_DOUBLE) < 0)
+ goto done;
+ if(H5Tinsert(set_tid, "field4", HOFFSET(set_t, field4), sub4_tid) < 0)
+ goto done;
+
+ /* Create the compound type with escape/separator characters */
+ if((esc_sub2_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub2_t))) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub2_tid, ".a", HOFFSET(sub2_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub2_tid, ",b", HOFFSET(sub2_t, b), sub22_tid) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub2_tid, "\\K", HOFFSET(sub2_t, c), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((esc_sub4_tid = H5Tcreate(H5T_COMPOUND, sizeof(sub4_t))) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub4_tid, "a.", HOFFSET(sub4_t, a), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(esc_sub4_tid, "b,", HOFFSET(sub4_t, b), H5T_NATIVE_INT) < 0)
+ goto done;
+
+ if((esc_set_tid = H5Tcreate(H5T_COMPOUND, sizeof(set_t))) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field,1", HOFFSET(set_t, field1), H5T_NATIVE_INT) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field2.", HOFFSET(set_t, field2), esc_sub2_tid) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field\\3", HOFFSET(set_t, field3), H5T_NATIVE_DOUBLE) < 0)
+ goto done;
+ if(H5Tinsert(esc_set_tid, "field4,", HOFFSET(set_t, field4), esc_sub4_tid) < 0)
+ goto done;
+
+ /* Generate DSET_CMPD, DSET_CMPD_ESC */
+ if(generate_dset(fid, DSET_CMPD, 1, cur_dims, max_dims, set_tid, one_cbuf) < 0)
+ goto done;
+ if(generate_dset(fid, DSET_CMPD_ESC, 1, cur_dims, max_dims, esc_set_tid, one_cbuf) < 0)
+ goto done;
+
+ /* Initialization for two-dimensional compound typed dataset */
+ cur2_dims[0] = TWO_DIMS0;
+ cur2_dims[1] = TWO_DIMS1;
+ max2_dims[0] = MAX_TWO_DIMS0;
+ max2_dims[0] = MAX_TWO_DIMS1;
+
+ for (i = 0; i < (TWO_DIMS0 * TWO_DIMS1); i++) {
+ two_cbuf[i].field1 = 1;
+ two_cbuf[i].field2.a = 2;
+ two_cbuf[i].field2.c = 4;
+ two_cbuf[i].field2.b.a = 20;
+ two_cbuf[i].field2.b.b = 40;
+ two_cbuf[i].field2.b.c = 80;
+ two_cbuf[i].field3 = 3.0F;
+ two_cbuf[i].field4.a = 4;
+ two_cbuf[i].field4.b = 8;
+ }
+
+ /* Generate DSET_CMPD_TWO */
+ if(generate_dset(fid, DSET_CMPD_TWO, 2, cur2_dims, max2_dims, set_tid, two_cbuf) < 0)
+ goto done;
+
+ /* Closing */
+ if(H5Tclose(sub22_tid) < 0) goto done;
+ if(H5Tclose(sub2_tid) < 0) goto done;
+ if(H5Tclose(sub4_tid) < 0) goto done;
+ if(H5Tclose(set_tid) < 0) goto done;
+ if(H5Tclose(esc_sub2_tid) < 0) goto done;
+ if(H5Tclose(esc_sub4_tid) < 0) goto done;
+ if(H5Tclose(esc_set_tid) < 0) goto done;
+ if(H5Pclose(fapl) < 0) goto done;
+ if(H5Fclose(fid) < 0) goto done;
+
+ exit(EXIT_SUCCESS);
+
+done:
+ H5E_BEGIN_TRY
+ H5Tclose(sub22_tid);
+ H5Tclose(sub2_tid);
+ H5Tclose(sub4_tid);
+ H5Tclose(set_tid);
+ H5Tclose(esc_sub2_tid);
+ H5Tclose(esc_sub4_tid);
+ H5Tclose(esc_set_tid);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5E_END_TRY
+
+ exit(EXIT_FAILURE);
+} /* main() */
diff --git a/hl/tools/h5watch/swmr_check_compat_vfd.c b/hl/tools/h5watch/swmr_check_compat_vfd.c
new file mode 100644
index 0000000..87b87c4
--- /dev/null
+++ b/hl/tools/h5watch/swmr_check_compat_vfd.c
@@ -0,0 +1,59 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Purpose: This is a small program that checks if the HDF5_DRIVER
+ * environment variable is set to a value that supports SWMR.
+ *
+ * It is intended for use in shell scripts.
+ */
+
+#include <stdlib.h>
+
+#include "H5private.h"
+
+/* This file needs to access the file driver testing code */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#include "H5FDpkg.h" /* File drivers */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Inspects the HDF5_DRIVER environment variable, which
+ * determines the VFD that the test harness will use with
+ * the majority of the tests.
+ *
+ * Return: VFD supports SWMR: EXIT_SUCCESS
+ *
+ * VFD does not support SWMR
+ * or failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ char *driver = NULL;
+
+ driver = HDgetenv("HDF5_DRIVER");
+
+ if(H5FD_supports_swmr_test(driver))
+ return EXIT_SUCCESS;
+ else
+ return EXIT_FAILURE;
+
+} /* end main() */
+
diff --git a/hl/tools/h5watch/testh5watch.sh.in b/hl/tools/h5watch/testh5watch.sh.in
new file mode 100644
index 0000000..ff31768
--- /dev/null
+++ b/hl/tools/h5watch/testh5watch.sh.in
@@ -0,0 +1,395 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the h5watch tool
+#
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [[ $rc != 0 ]] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "h5watch tests skipped"
+ echo
+ exit 0
+fi
+
+H5WATCH=h5watch # The tool name
+H5WATCH_BIN=`pwd`/$H5WATCH # The path of H5WATCH
+EXTEND_DSET=extend_dset # Routine to extend the dataset when watching
+EXTEND_BIN=`pwd`/$EXTEND_DSET # The path of EXTEND_DSET
+#
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+#
+GEN_TEST=h5watchgentest # Generate HDF5 file with various datasets
+GEN_TEST_BIN=`pwd`/$GEN_TEST # Path of the binary GEN_TEST
+WATCHFILE=`pwd`/WATCH.h5 # The HDF5 file generated to test h5watch
+TESTFILE=TEST.h5 # The temporary file (a copy of WATCH.h5) used by tests
+#
+# These 3 defines should be the same as the defines in ./extend_dset.c
+WRITER_MESSAGE=writer_message # The message file created by the "extend" process
+READER_MESSAGE=reader_message # The message file created by the "watch" process
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+#
+CMP='cmp -s'
+DIFF='diff -c'
+NLINES=20 # Max. lines of output to display if test fails
+#
+# Mac OS: just to make sure echo "backslash backslash" behaves properly
+if test `uname -s` = 'Darwin'; then
+ ECHO='/bin/echo'
+else
+ ECHO='echo'
+fi
+#
+# Global variables
+nerrors=0
+verbose=yes
+
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+test -d ../testfiles || mkdir ../testfiles
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+TESTING() {
+ SPACES=" "
+ $ECHO "Testing $* $SPACES" |cut -c1-70 |tr -d '\012'
+}
+
+#
+# Overall algorithm:
+#
+# Run a test and print PASSED or FAILED
+# If a test did not return with the expected return code,
+# increment the `nerrors' global variable and (if $verbose is set) display up to $NLINES
+# lines of the actual output from the test.
+# If the test did return the expected code,
+# compare the actual output with the expected output;
+# If the outputs are the same, print PASSED,
+# Otherwise print FAILED and the difference between the two outputs.
+# The output files are not removed if $HDF5_NOCLEANUP has a non-zero value.
+#
+#
+# TOOLTEST():
+#
+# Arguments:
+#
+# $1 -- expected output
+# $2 -- expected return code
+# $3 and on -- arguments for h5watch
+TOOLTEST() {
+ expect="$srcdir/../testfiles/$1"
+ actual="../testfiles/`basename $1 .ddl`.out"
+ actual_err="../testfiles/`basename $1 .ddl`.err"
+ shift
+ retvalexpect=$1
+ shift
+ # Run test.
+ # Stderr is included in stdout so that the diff can detect
+ # any unexpected output from that stream too.
+ TESTING $H5WATCH $@
+ (
+ $ECHO "#############################"
+ $ECHO " output for '$H5WATCH $@'"
+ $ECHO "#############################"
+ $RUNSERIAL $H5WATCH_BIN "$@"
+ ) > $actual 2>$actual_err
+ exitcode=$?
+ cat $actual_err >> $actual
+ if [ $exitcode -ne $retvalexpect ]; then
+ $ECHO "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if [ yes = "$verbose" ]; then
+ $ECHO "test returned with exit code $exitcode"
+ $ECHO "test output: (up to $NLINES lines)"
+ head -$NLINES $actual
+ $ECHO "***end of test output***"
+ $ECHO ""
+ fi
+ elif $CMP $expect $actual; then
+ $ECHO " PASSED"
+ else
+ $ECHO "*FAILED*"
+ $ECHO " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err
+ fi
+}
+#
+#
+#
+# TEST_WATCH():
+#
+# Arguments:
+# $1 -- the specified dataset to watch and to extend
+# $2 -- the options to h5watch (can be NULL)
+# $3 -- expected output from watching the object
+#
+TEST_WATCH() {
+ cp $WATCHFILE $TESTFILE # Copy the file being watched/extended to a temporary file
+ actual="../testfiles/`basename $3 .ddl`.out" # The actual output
+ expect="$srcdir/../testfiles/$3" # The expected output
+ #
+ # Set up options and object to h5watch
+ if test -z "$2"; then
+ OBJ="$TESTFILE/$1" # Empty options, just object to h5watch
+ else
+ OBJ="$2 $TESTFILE/$1" # Options + object to h5watch
+ fi
+ rm -f $WRITER_MESSAGE # Remove the file just to be sure
+ rm -f $READER_MESSAGE # Remove the file just to be sure
+ #
+ $EXTEND_BIN $TESTFILE $1 & # Extend the dataset; put in background
+ extend_pid=$! # Get "extend" process ID
+ #
+ # Wait for message from "extend_dset" process to start h5watch--
+ # To wait for the writer message file or till the maximum # of seconds is reached
+ # This performs similar function as the routine h5_wait_message() in test/h5test.c
+ mexist=0 # Indicate whether the message file is found
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $WRITER_MESSAGE ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $WRITER_MESSAGE # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ #
+ # If message file from "extend" process is found--
+ # start h5watch
+ # send message to "extend" process to start work
+ # wait for "extend" process to complete, then kill h5watch
+ # determine test result
+ # If message file from "extend" process is not found--
+ # there is some problem; the test fails
+ #
+ if test $mexist -eq 0; then
+ $ECHO "*FAILED*"
+ $ECHO "Problem with extend_dset...this test failed."
+ else
+ #
+ # Run h5watch; put in background; collect output to a file
+ TESTING $H5WATCH $OBJ
+ head -n 3 $expect > $actual # copy the first 3 lines from $expect (just the command line)
+ $RUNSERIAL $H5WATCH_BIN $2 "$TESTFILE/$1" >> $actual &
+ watch_pid=$! # Get h5watch process ID
+ cp /dev/null $READER_MESSAGE # Send message to "extend" process to start work
+ wait $extend_pid # Wait for "extend" process to complete
+ extend_exit=$? # Collect "extend" process' exit code
+ sleep 1 # Sleep to make sure output is flushed
+ kill $watch_pid # Kill h5watch
+ wait $watch_pid # Wait for "h5watch" process to complete
+ #
+ if [ $extend_exit -ne 0 ]; then # Error returned from "extend" process
+ $ECHO "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if [ yes = "$verbose" ]; then
+ $ECHO "extend test returned with exit code $extend_exit"
+ $ECHO "test output: (up to $NLINES lines)"
+ head -$NLINES $actual
+ $ECHO "***end of test output***"
+ $ECHO ""
+ fi
+ elif $CMP $expect $actual; then # Compare actual output with expected output
+ $ECHO " PASSED"
+ else
+ $ECHO "*FAILED*" # Actual and expected outputs are different
+ $ECHO " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ if test yes = "$verbose"; then
+ $DIFF $expect $actual |sed 's/^/ /'
+ fi
+ fi
+ #
+ # Cleaning
+ rm -f $TESTFILE
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual
+ fi
+ fi
+}
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+#
+#
+#################################################################################################
+# #
+# WATCH.h5: file with various types of datasets for testing-- #
+# The following datasets are chunked, H5D_ALLOC_TIME_INCR, max. dimensional setting: #
+# DSET_ONE: one-dimensional dataset #
+# DSET_TWO: two-dimensional dataset #
+# DSET_CMPD: one-dimensional dataset with compound type #
+# DSET_CMPD_ESC: one-dimensional dataset with compound type & escape/separator characters #
+# DSET_CMPD_TWO: two-dimensional dataset with compound type #
+# #
+# The following datasets are one-dimensional, chunked, max. dimension setting: #
+# DSET_ALLOC_EARLY: dataset with H5D_ALLOC_TIME_EARLY #
+# DSET_ALLOC_LATE: dataset H5D_ALLOC_TIME_LATE #
+# #
+# The following datasets are one-dimensional: #
+# DSET_NONE: fixed dimension setting, contiguous, H5D_ALLOC_TIME_LATE #
+# DSET_NOMAX: fixed dimension setting, chunked, H5D_ALLOC_TIME_INCR #
+# #
+#################################################################################################
+#
+#
+#################################################################################################
+# #
+# Tests on expected failures: #
+# Invalid file name #
+# Unable to find dataset, invalid dataset #
+# DSET_NONE and DSET_NOMAX #
+# Invalid input to options --width and --polling #
+# Invalid field names for -f option #
+# #
+#################################################################################################
+#
+# Generate file with various types of datasets
+$GEN_TEST_BIN
+# Test on --help options
+TOOLTEST w-help1.ddl 0 --help
+#
+# Tests on expected failures
+TOOLTEST w-err-dset1.ddl 1 WATCH.h5
+TOOLTEST w-err-dset2.ddl 1 WATCH.h5/group/DSET_CMPD
+TOOLTEST w-err-dset-none.ddl 1 WATCH.h5/DSET_NONE
+TOOLTEST w-err-dset-nomax.ddl 1 WATCH.h5/DSET_NOMAX
+TOOLTEST w-err-file.ddl 1 ../WATCH.h5/DSET_CMPD
+TOOLTEST w-err-width.ddl 1 --width=-8 WATCH.h5/DSET_ONE
+TOOLTEST w-err-poll.ddl 1 --polling=-8 WATCH.h5/DSET_ONE
+TOOLTEST w-err-poll0.ddl 1 --polling=0 WATCH.h5/DSET_ONE
+#
+# Tests on invalid field names via --fields option for a compound typed dataset: DSET_CMPD
+TOOLTEST w-err-cmpd1.ddl 1 --fields=fieldx WATCH.h5/DSET_CMPD
+TOOLTEST w-err-cmpd2.ddl 1 --fields=field1,field2. WATCH.h5/DSET_CMPD
+TOOLTEST w-err-cmpd3.ddl 1 --fields=field1,field2, WATCH.h5/DSET_CMPD
+TOOLTEST w-err-cmpd4.ddl 1 --fields=field1,field2.b.k WATCH.h5/DSET_CMPD
+TOOLTEST w-err-cmpd5.ddl 1 --fields=field1 --fields=field2.b.k WATCH.h5/DSET_CMPD
+#
+echo "DONE WITH 1st SET OF TESTS"
+#
+#
+#
+#################################
+# Tests without options #
+#################################
+#
+# Generate file WATCH.h5 with various types of datasets,
+$GEN_TEST_BIN
+#
+# Watching and extending: (TEST.h5 is a copy of WATCH.h5)
+# TEST.h5/DSET_ONE
+# TEST.h5/DSET_ALLOC_EARLY
+# TEST.h5/DSET_ALLOC_LATE
+# TEST.h5/DSET_CMPD
+# TEST.h5/DSET_TWO
+# TEST.h5/DSET_CMPD_TWO
+# TEST.h5/DSET_CMPD_ESC
+#
+TEST_WATCH DSET_ONE '' w-ext-one.ddl
+TEST_WATCH DSET_ALLOC_EARLY '' w-ext-early.ddl
+TEST_WATCH DSET_ALLOC_LATE '' w-ext-late.ddl
+TEST_WATCH DSET_CMPD '' w-ext-cmpd.ddl
+TEST_WATCH DSET_TWO '' w-ext-two.ddl
+TEST_WATCH DSET_CMPD_TWO '' w-ext-cmpd-two.ddl
+TEST_WATCH DSET_CMPD_ESC '' w-ext-cmpd-esc.ddl
+#
+echo "DONE WITH 2nd SET OF TESTS"
+#
+#
+#
+#################################
+# Tests on --fields option #
+#################################
+#
+# Watching and extending: (TEST.h5 is a copy of WATCH.h5)
+# TEST.h5/DSET_CMPD with --fields=field1,field2
+# TEST.h5/DSET_CMPD with --fields=field2.b,field4
+# TEST.h5/DSET_CMPD with --fields=field2.b.a --fields=field2.c
+TEST_WATCH DSET_CMPD --fields=field1,field2 w-ext-cmpd-f1.ddl
+TEST_WATCH DSET_CMPD --fields=field2.b,field4 w-ext-cmpd-f2.ddl
+TEST_WATCH DSET_CMPD '--fields=field2.b.a --fields=field2.c' w-ext-cmpd-ff3.ddl
+#
+#
+# TEST.h5/DSET_CMP_TWO with --fields=field1,field2
+# TEST.h5/DSET_CMPD_TWO with --fields=field2.b --fields=field4
+# TEST.h5/DSET_CMPD_TWO with --fields=field2.b.a,field2.c
+TEST_WATCH DSET_CMPD_TWO --fields=field1,field2 w-ext-cmpd-two-f1.ddl
+TEST_WATCH DSET_CMPD_TWO '--fields=field2.b --fields=field4' w-ext-cmpd-two-ff2.ddl
+TEST_WATCH DSET_CMPD_TWO --fields=field2.b.a,field2.c w-ext-cmpd-two-f3.ddl
+#
+#
+# TEST.h5/DSET_CMPD_ESC with --fields=field\,1,field2\.
+# TEST.h5/DSET_CMPD_ESC with --fields=field2\..\,b --fields=field4\,
+# TEST.h5/DSET_CMPD_ESC with --fields=field2\..\,b.a,field2\..\\K
+TEST_WATCH DSET_CMPD_ESC '--fields=field\,1,field2\.' w-ext-cmpd-esc-f1.ddl
+TEST_WATCH DSET_CMPD_ESC '--fields=field2\..\,b --fields=field4\,' w-ext-cmpd-esc-ff2.ddl
+TEST_WATCH DSET_CMPD_ESC '--fields=field2\..\,b.a,field2\..\\K' w-ext-cmpd-esc-f3.ddl
+#
+#
+echo "DONE WITH 3rd SET OF TESTS"
+#
+#
+#
+#################################################
+# Tests on options: #
+# --dim #
+# --width, --label, --simple, --help #
+#################################################
+#
+# Watching and extending: (TEST.h5 is a copy of WATCH.h5)
+# TEST.h5/DSET_ONE with -d option
+# TEST.h5/DSET_TWO with --dim option
+# TEST.h5/DSET_TWO with --width=60 option
+# TEST.h5/DSET_CMPD with --label option
+# TEST.h5/DSET_ONE with --simple option
+TEST_WATCH DSET_ONE --dim w-ext-one-d.ddl
+TEST_WATCH DSET_TWO --dim w-ext-two-d.ddl
+TEST_WATCH DSET_TWO --width=30 w-ext-two-width.ddl
+TEST_WATCH DSET_CMPD --label w-ext-cmpd-label.ddl
+TEST_WATCH DSET_ONE --simple w-ext-one-simple.ddl
+#
+echo "DONE WITH 4th SET OF TESTS"
+#
+#
+#
+if test $nerrors -eq 0 ; then
+ $ECHO "All h5watch tests passed."
+ exit $EXIT_SUCCESS
+else
+ $ECHO "h5watch tests failed with $nerrors errors."
+ exit $EXIT_FAILURE
+fi
diff --git a/hl/tools/testfiles/w-err-cmpd1.ddl b/hl/tools/testfiles/w-err-cmpd1.ddl
new file mode 100644
index 0000000..2a3f796
--- /dev/null
+++ b/hl/tools/testfiles/w-err-cmpd1.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch --fields=fieldx WATCH.h5/DSET_CMPD'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: error in processing <list_of_fields>
diff --git a/hl/tools/testfiles/w-err-cmpd2.ddl b/hl/tools/testfiles/w-err-cmpd2.ddl
new file mode 100644
index 0000000..4de3fe2
--- /dev/null
+++ b/hl/tools/testfiles/w-err-cmpd2.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch --fields=field1,field2. WATCH.h5/DSET_CMPD'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: error in processing <list_of_fields>
diff --git a/hl/tools/testfiles/w-err-cmpd3.ddl b/hl/tools/testfiles/w-err-cmpd3.ddl
new file mode 100644
index 0000000..f2f462e
--- /dev/null
+++ b/hl/tools/testfiles/w-err-cmpd3.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch --fields=field1,field2, WATCH.h5/DSET_CMPD'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: error in processing <list_of_fields>
diff --git a/hl/tools/testfiles/w-err-cmpd4.ddl b/hl/tools/testfiles/w-err-cmpd4.ddl
new file mode 100644
index 0000000..b11277b
--- /dev/null
+++ b/hl/tools/testfiles/w-err-cmpd4.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch --fields=field1,field2.b.k WATCH.h5/DSET_CMPD'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: error in processing <list_of_fields>
diff --git a/hl/tools/testfiles/w-err-cmpd5.ddl b/hl/tools/testfiles/w-err-cmpd5.ddl
new file mode 100644
index 0000000..7174d14
--- /dev/null
+++ b/hl/tools/testfiles/w-err-cmpd5.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch --fields=field1 --fields=field2.b.k WATCH.h5/DSET_CMPD'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: error in processing <list_of_fields>
diff --git a/hl/tools/testfiles/w-err-dset-nomax.ddl b/hl/tools/testfiles/w-err-dset-nomax.ddl
new file mode 100644
index 0000000..c2fd65f
--- /dev/null
+++ b/hl/tools/testfiles/w-err-dset-nomax.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch WATCH.h5/DSET_NOMAX'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: "/DSET_NOMAX" should have unlimited or max. dimension setting
diff --git a/hl/tools/testfiles/w-err-dset-none.ddl b/hl/tools/testfiles/w-err-dset-none.ddl
new file mode 100644
index 0000000..f9165aa
--- /dev/null
+++ b/hl/tools/testfiles/w-err-dset-none.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch WATCH.h5/DSET_NONE'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: "/DSET_NONE" should be a chunked or virtual dataset
diff --git a/hl/tools/testfiles/w-err-dset1.ddl b/hl/tools/testfiles/w-err-dset1.ddl
new file mode 100644
index 0000000..0594562
--- /dev/null
+++ b/hl/tools/testfiles/w-err-dset1.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch WATCH.h5'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: no dataset specified
diff --git a/hl/tools/testfiles/w-err-dset2.ddl b/hl/tools/testfiles/w-err-dset2.ddl
new file mode 100644
index 0000000..ce26109
--- /dev/null
+++ b/hl/tools/testfiles/w-err-dset2.ddl
@@ -0,0 +1,5 @@
+#############################
+ output for 'h5watch WATCH.h5/group/DSET_CMPD'
+#############################
+Opened "WATCH.h5" with sec2 driver.
+h5watch error: unable to open dataset "/group/DSET_CMPD"
diff --git a/hl/tools/testfiles/w-err-file.ddl b/hl/tools/testfiles/w-err-file.ddl
new file mode 100644
index 0000000..416fd44
--- /dev/null
+++ b/hl/tools/testfiles/w-err-file.ddl
@@ -0,0 +1,4 @@
+#############################
+ output for 'h5watch ../WATCH.h5/DSET_CMPD'
+#############################
+h5watch error: unable to open file "../WATCH.h5/DSET_CMPD"
diff --git a/hl/tools/testfiles/w-err-poll.ddl b/hl/tools/testfiles/w-err-poll.ddl
new file mode 100644
index 0000000..56c8d89
--- /dev/null
+++ b/hl/tools/testfiles/w-err-poll.ddl
@@ -0,0 +1,38 @@
+#############################
+ output for 'h5watch --polling=-8 WATCH.h5/DSET_ONE'
+#############################
+Usage: h5watch [OPTIONS] [OBJECT]
+
+ OPTIONS
+ --help Print a usage message and exit.
+ --version Print version number and exit.
+ --label Label members of compound typed dataset.
+ --simple Use a machine-readable output format.
+ --dim Monitor changes in size of dataset dimensions only.
+ --width=N Set the number of columns to N for output.
+ A value of 0 sets the number of columns to the
+ maximum (65535). The default width is 80 columns.
+ --polling=N Set the polling interval to N (in seconds) when the
+ dataset will be checked for appended data. The default
+ polling interval is 1.
+ --fields=<list_of_fields>
+ Display data for the fields specified in <list_of_fields>
+ for a compound data type. <list_of_fields> can be
+ specified as follows:
+ 1) A comma-separated list of field names in a
+ compound data type. "," is the separator
+ for field names while "." is the separator
+ for a nested field.
+ 2) A single field name in a compound data type.
+ Can use this option multiple times.
+ Note that backslash is the escape character to avoid
+ characters in field names that conflict with the tool's
+ separators.
+
+ OBJECT is specified as [<filename>/<path_to_dataset>/<dsetname>]
+ <filename> Name of the HDF5 file. It may be preceded by path
+ separated by slashes to the specified HDF5 file.
+ <path_to_dataset> Path separated by slashes to the specified dataset
+ <dsetname> Name of the dataset
+
+ User can end the h5watch process by ctrl-C (SIGINT) or kill the process (SIGTERM).
diff --git a/hl/tools/testfiles/w-err-poll0.ddl b/hl/tools/testfiles/w-err-poll0.ddl
new file mode 100644
index 0000000..ff6f322
--- /dev/null
+++ b/hl/tools/testfiles/w-err-poll0.ddl
@@ -0,0 +1,38 @@
+#############################
+ output for 'h5watch --polling=0 WATCH.h5/DSET_ONE'
+#############################
+Usage: h5watch [OPTIONS] [OBJECT]
+
+ OPTIONS
+ --help Print a usage message and exit.
+ --version Print version number and exit.
+ --label Label members of compound typed dataset.
+ --simple Use a machine-readable output format.
+ --dim Monitor changes in size of dataset dimensions only.
+ --width=N Set the number of columns to N for output.
+ A value of 0 sets the number of columns to the
+ maximum (65535). The default width is 80 columns.
+ --polling=N Set the polling interval to N (in seconds) when the
+ dataset will be checked for appended data. The default
+ polling interval is 1.
+ --fields=<list_of_fields>
+ Display data for the fields specified in <list_of_fields>
+ for a compound data type. <list_of_fields> can be
+ specified as follows:
+ 1) A comma-separated list of field names in a
+ compound data type. "," is the separator
+ for field names while "." is the separator
+ for a nested field.
+ 2) A single field name in a compound data type.
+ Can use this option multiple times.
+ Note that backslash is the escape character to avoid
+ characters in field names that conflict with the tool's
+ separators.
+
+ OBJECT is specified as [<filename>/<path_to_dataset>/<dsetname>]
+ <filename> Name of the HDF5 file. It may be preceded by path
+ separated by slashes to the specified HDF5 file.
+ <path_to_dataset> Path separated by slashes to the specified dataset
+ <dsetname> Name of the dataset
+
+ User can end the h5watch process by ctrl-C (SIGINT) or kill the process (SIGTERM).
diff --git a/hl/tools/testfiles/w-err-width.ddl b/hl/tools/testfiles/w-err-width.ddl
new file mode 100644
index 0000000..bf405a4
--- /dev/null
+++ b/hl/tools/testfiles/w-err-width.ddl
@@ -0,0 +1,38 @@
+#############################
+ output for 'h5watch --width=-8 WATCH.h5/DSET_ONE'
+#############################
+Usage: h5watch [OPTIONS] [OBJECT]
+
+ OPTIONS
+ --help Print a usage message and exit.
+ --version Print version number and exit.
+ --label Label members of compound typed dataset.
+ --simple Use a machine-readable output format.
+ --dim Monitor changes in size of dataset dimensions only.
+ --width=N Set the number of columns to N for output.
+ A value of 0 sets the number of columns to the
+ maximum (65535). The default width is 80 columns.
+ --polling=N Set the polling interval to N (in seconds) when the
+ dataset will be checked for appended data. The default
+ polling interval is 1.
+ --fields=<list_of_fields>
+ Display data for the fields specified in <list_of_fields>
+ for a compound data type. <list_of_fields> can be
+ specified as follows:
+ 1) A comma-separated list of field names in a
+ compound data type. "," is the separator
+ for field names while "." is the separator
+ for a nested field.
+ 2) A single field name in a compound data type.
+ Can use this option multiple times.
+ Note that backslash is the escape character to avoid
+ characters in field names that conflict with the tool's
+ separators.
+
+ OBJECT is specified as [<filename>/<path_to_dataset>/<dsetname>]
+ <filename> Name of the HDF5 file. It may be preceded by path
+ separated by slashes to the specified HDF5 file.
+ <path_to_dataset> Path separated by slashes to the specified dataset
+ <dsetname> Name of the dataset
+
+ User can end the h5watch process by ctrl-C (SIGINT) or kill the process (SIGTERM).
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl b/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
new file mode 100644
index 0000000..6e1d2b5
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
@@ -0,0 +1,14 @@
+#############################
+ output for 'h5watch --fields=field\,1,field2\. TEST.h5/DSET_CMPD_ESC'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_ESC...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}},
+ (12) {3, {4, {4, 4, 4}, 4}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl b/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
new file mode 100644
index 0000000..7623f3f
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
@@ -0,0 +1,13 @@
+#############################
+ output for 'h5watch --fields=field2\..\,b.a,field2\..\\K TEST.h5/DSET_CMPD_ESC'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_ESC...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {{{2}}, {2}}, {{{3}}, {3}}, {{{4}}, {4}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {{{2}}, {2}}, {{{3}}, {3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl b/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
new file mode 100644
index 0000000..db331f0
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
@@ -0,0 +1,14 @@
+#############################
+ output for 'h5watch --fields=field2\..\,b --fields=field4\, TEST.h5/DSET_CMPD_ESC'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_ESC...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}, {{{4, 4, 4}}, {6,
+ (12) 6}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc.ddl b/hl/tools/testfiles/w-ext-cmpd-esc.ddl
new file mode 100644
index 0000000..66eb48c
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-esc.ddl
@@ -0,0 +1,16 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_CMPD_ESC'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_ESC...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}},
+ (11) {2, {3, {3, 3, 3}, 3}, 4, {5, 5}},
+ (12) {3, {4, {4, 4, 4}, 4}, 5, {6, 6}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}}, {2, {3, {3, 3, 3}, 3}, 4, {5,
+ (2) 5}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-f1.ddl b/hl/tools/testfiles/w-ext-cmpd-f1.ddl
new file mode 100644
index 0000000..a722f68
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-f1.ddl
@@ -0,0 +1,14 @@
+#############################
+ output for 'h5watch --fields=field1,field2 TEST.h5/DSET_CMPD'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}},
+ (12) {3, {4, {4, 4, 4}, 4}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-f2.ddl b/hl/tools/testfiles/w-ext-cmpd-f2.ddl
new file mode 100644
index 0000000..b6561c5
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-f2.ddl
@@ -0,0 +1,14 @@
+#############################
+ output for 'h5watch --fields=field2.b,field4 TEST.h5/DSET_CMPD'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}, {{{4, 4, 4}}, {6,
+ (12) 6}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-ff3.ddl b/hl/tools/testfiles/w-ext-cmpd-ff3.ddl
new file mode 100644
index 0000000..c3d22f5
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-ff3.ddl
@@ -0,0 +1,13 @@
+#############################
+ output for 'h5watch --fields=field2.b.a --fields=field2.c TEST.h5/DSET_CMPD'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {{{2}}, {2}}, {{{3}}, {3}}, {{{4}}, {4}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {{{2}}, {2}}, {{{3}}, {3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-label.ddl b/hl/tools/testfiles/w-ext-cmpd-label.ddl
new file mode 100644
index 0000000..63ac47e
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-label.ddl
@@ -0,0 +1,21 @@
+#############################
+ output for 'h5watch --label TEST.h5/DSET_CMPD'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {field1=1, field2={a=2, b={a=2, b=2, c=2}, c=2}, field3=3,
+ (10) field4={a=4, b=4}},
+ (11) {field1=2, field2={a=3, b={a=3, b=3, c=3}, c=3}, field3=4,
+ (11) field4={a=5, b=5}},
+ (12) {field1=3, field2={a=4, b={a=4, b=4, c=4}, c=4}, field3=5,
+ (12) field4={a=6, b=6}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {field1=1, field2={a=2, b={a=2, b=2, c=2}, c=2}, field3=3,
+ (1) field4={a=4, b=4}},
+ (2) {field1=2, field2={a=3, b={a=3, b=3, c=3}, c=3}, field3=4,
+ (2) field4={a=5, b=5}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl b/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
new file mode 100644
index 0000000..a49f9f1
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
@@ -0,0 +1,50 @@
+#############################
+ output for 'h5watch --fields=field1,field2 TEST.h5/DSET_CMPD_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+ Data:
+ (0,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
+ (1,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
+ (2,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
+ (3,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
+ (4,0) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (4,2) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (4,4) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (4,6) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (4,8) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (4,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (5,0) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (5,2) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (5,4) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (5,6) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (5,8) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
+ (5,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+ Data:
+ (6,0) {2, {2, {2, 2, 2}, 2}},
+ (7,0) {2, {2, {2, 2, 2}, 2}}
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+ Data:
+ (8,0) {3, {3, {3, 3, 3}, 3}},
+ (9,0) {3, {3, {3, 3, 3}, 3}}
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+ Data:
+ (0,1) {4, {4, {4, 4, 4}, 4}}, {4, {4, {4, 4, 4}, 4}}
+ (1,1) {4, {4, {4, 4, 4}, 4}}, {4, {4, {4, 4, 4}, 4}}
+ (2,1) {4, {4, {4, 4, 4}, 4}}, {4, {4, {4, 4, 4}, 4}}
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+ Data:
+ (0,2) {7, {7, {7, 7, 7}, 7}}, {7, {7, {7, 7, 7}, 7}}
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl b/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
new file mode 100644
index 0000000..a0ab9ac
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
@@ -0,0 +1,44 @@
+#############################
+ output for 'h5watch --fields=field2.b.a,field2.c TEST.h5/DSET_CMPD_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+ Data:
+ (0,10) {{{1}}, {1}}, {{{1}}, {1}}
+ (1,10) {{{1}}, {1}}, {{{1}}, {1}}
+ (2,10) {{{1}}, {1}}, {{{1}}, {1}}
+ (3,10) {{{1}}, {1}}, {{{1}}, {1}}
+ (4,0) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
+ (4,4) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
+ (4,8) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
+ (5,0) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
+ (5,4) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
+ (5,8) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+ Data:
+ (6,0) {{{2}}, {2}},
+ (7,0) {{{2}}, {2}}
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+ Data:
+ (8,0) {{{3}}, {3}},
+ (9,0) {{{3}}, {3}}
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+ Data:
+ (0,1) {{{4}}, {4}}, {{{4}}, {4}}
+ (1,1) {{{4}}, {4}}, {{{4}}, {4}}
+ (2,1) {{{4}}, {4}}, {{{4}}, {4}}
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+ Data:
+ (0,2) {{{7}}, {7}}, {{{7}}, {7}}
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl b/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
new file mode 100644
index 0000000..e32a818
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
@@ -0,0 +1,50 @@
+#############################
+ output for 'h5watch --fields=field2.b --fields=field4 TEST.h5/DSET_CMPD_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+ Data:
+ (0,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
+ (1,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
+ (2,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
+ (3,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
+ (4,0) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (4,2) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (4,4) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (4,6) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (4,8) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (4,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (5,0) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (5,2) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (5,4) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (5,6) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (5,8) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
+ (5,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+ Data:
+ (6,0) {{{2, 2, 2}}, {2, 2}},
+ (7,0) {{{2, 2, 2}}, {2, 2}}
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+ Data:
+ (8,0) {{{3, 3, 3}}, {3, 3}},
+ (9,0) {{{3, 3, 3}}, {3, 3}}
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+ Data:
+ (0,1) {{{4, 4, 4}}, {4, 4}}, {{{4, 4, 4}}, {4, 4}}
+ (1,1) {{{4, 4, 4}}, {4, 4}}, {{{4, 4, 4}}, {4, 4}}
+ (2,1) {{{4, 4, 4}}, {4, 4}}, {{{4, 4, 4}}, {4, 4}}
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+ Data:
+ (0,2) {{{7, 7, 7}}, {7, 7}}, {{{7, 7, 7}}, {7, 7}}
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-two.ddl b/hl/tools/testfiles/w-ext-cmpd-two.ddl
new file mode 100644
index 0000000..90775e3
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd-two.ddl
@@ -0,0 +1,70 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_CMPD_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+ Data:
+ (0,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (0,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
+ (1,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (1,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
+ (2,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (2,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
+ (3,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (3,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
+ (4,0) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,1) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,2) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,3) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,4) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,5) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,6) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,7) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,8) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,9) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (4,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,0) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,1) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,2) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,3) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,4) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,5) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,6) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,7) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,8) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,9) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
+ (5,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+ Data:
+ (6,0) {2, {2, {2, 2, 2}, 2}, 2, {2, 2}},
+ (7,0) {2, {2, {2, 2, 2}, 2}, 2, {2, 2}}
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+ Data:
+ (8,0) {3, {3, {3, 3, 3}, 3}, 3, {3, 3}},
+ (9,0) {3, {3, {3, 3, 3}, 3}, 3, {3, 3}}
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+ Data:
+ (0,1) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}},
+ (0,2) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}}
+ (1,1) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}},
+ (1,2) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}}
+ (2,1) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}},
+ (2,2) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}}
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+ Data:
+ (0,2) {7, {7, {7, 7, 7}, 7}, 7, {7, 7}},
+ (0,3) {7, {7, {7, 7, 7}, 7}, 7, {7, 7}}
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd.ddl b/hl/tools/testfiles/w-ext-cmpd.ddl
new file mode 100644
index 0000000..5512d17
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-cmpd.ddl
@@ -0,0 +1,16 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_CMPD'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_CMPD...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}},
+ (11) {2, {3, {3, 3, 3}, 3}, 4, {5, 5}},
+ (12) {3, {4, {4, 4, 4}, 4}, 5, {6, 6}}
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}}, {2, {3, {3, 3, 3}, 3}, 4, {5,
+ (2) 5}}
diff --git a/hl/tools/testfiles/w-ext-early.ddl b/hl/tools/testfiles/w-ext-early.ddl
new file mode 100644
index 0000000..a822540
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-early.ddl
@@ -0,0 +1,13 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_ALLOC_EARLY'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_ALLOC_EARLY...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) 0, 1, 2
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) 0, 1
diff --git a/hl/tools/testfiles/w-ext-late.ddl b/hl/tools/testfiles/w-ext-late.ddl
new file mode 100644
index 0000000..724562f
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-late.ddl
@@ -0,0 +1,13 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_ALLOC_LATE'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_ALLOC_LATE...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) 0, 1, 2
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) 0, 1
diff --git a/hl/tools/testfiles/w-ext-one-d.ddl b/hl/tools/testfiles/w-ext-one-d.ddl
new file mode 100644
index 0000000..55d55ca
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-one-d.ddl
@@ -0,0 +1,9 @@
+#############################
+ output for 'h5watch --dim TEST.h5/DSET_ONE'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_ONE...
+dimension 0: 10->13 (increases)
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
diff --git a/hl/tools/testfiles/w-ext-one-simple.ddl b/hl/tools/testfiles/w-ext-one-simple.ddl
new file mode 100644
index 0000000..f657748
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-one-simple.ddl
@@ -0,0 +1,16 @@
+#############################
+ output for 'h5watch --simple TEST.h5/DSET_ONE'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_ONE...
+dimension 0: 10->13 (increases)
+ Data:
+ 0
+ 1
+ 2
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ 0
+ 1
diff --git a/hl/tools/testfiles/w-ext-one.ddl b/hl/tools/testfiles/w-ext-one.ddl
new file mode 100644
index 0000000..b46392e
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-one.ddl
@@ -0,0 +1,13 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_ONE'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_ONE...
+dimension 0: 10->13 (increases)
+ Data:
+ (10) 0, 1, 2
+dimension 0: 13->12 (decreases)
+dimension 0: 12->1 (decreases)
+dimension 0: 1->3 (increases)
+ Data:
+ (1) 0, 1
diff --git a/hl/tools/testfiles/w-ext-two-d.ddl b/hl/tools/testfiles/w-ext-two-d.ddl
new file mode 100644
index 0000000..dda09e6
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-two-d.ddl
@@ -0,0 +1,21 @@
+#############################
+ output for 'h5watch --dim TEST.h5/DSET_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-ext-two-width.ddl b/hl/tools/testfiles/w-ext-two-width.ddl
new file mode 100644
index 0000000..47e9221
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-two-width.ddl
@@ -0,0 +1,44 @@
+#############################
+ output for 'h5watch --width=30 TEST.h5/DSET_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+ Data:
+ (0,10) 1, 1
+ (1,10) 1, 1
+ (2,10) 1, 1
+ (3,10) 1, 1
+ (4,0) 1, 1, 1, 1, 1,
+ (4,5) 1, 1, 1, 1, 1,
+ (4,10) 1, 1,
+ (5,0) 1, 1, 1, 1, 1,
+ (5,5) 1, 1, 1, 1, 1,
+ (5,10) 1, 1
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+ Data:
+ (6,0) 2,
+ (7,0) 2
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+ Data:
+ (8,0) 3,
+ (9,0) 3
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+ Data:
+ (0,1) 4, 4
+ (1,1) 4, 4
+ (2,1) 4, 4
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+ Data:
+ (0,2) 7, 7
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-ext-two.ddl b/hl/tools/testfiles/w-ext-two.ddl
new file mode 100644
index 0000000..a98d333
--- /dev/null
+++ b/hl/tools/testfiles/w-ext-two.ddl
@@ -0,0 +1,40 @@
+#############################
+ output for 'h5watch TEST.h5/DSET_TWO'
+#############################
+Opened "TEST.h5" with sec2 driver.
+Monitoring dataset /DSET_TWO...
+dimension 0: 4->6 (increases)
+dimension 1: 10->12 (increases)
+ Data:
+ (0,10) 1, 1
+ (1,10) 1, 1
+ (2,10) 1, 1
+ (3,10) 1, 1
+ (4,0) 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ (5,0) 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+dimension 0: 6->8 (increases)
+dimension 1: 12->1 (decreases)
+ Data:
+ (6,0) 2,
+ (7,0) 2
+dimension 0: 8->10 (increases)
+dimension 1: 1->1 (unchanged)
+ Data:
+ (8,0) 3,
+ (9,0) 3
+dimension 0: 10->3 (decreases)
+dimension 1: 1->3 (increases)
+ Data:
+ (0,1) 4, 4
+ (1,1) 4, 4
+ (2,1) 4, 4
+dimension 0: 3->2 (decreases)
+dimension 1: 3->2 (decreases)
+dimension 0: 2->1 (decreases)
+dimension 1: 2->2 (unchanged)
+dimension 0: 1->1 (unchanged)
+dimension 1: 2->4 (increases)
+ Data:
+ (0,2) 7, 7
+dimension 0: 1->1 (unchanged)
+dimension 1: 4->3 (decreases)
diff --git a/hl/tools/testfiles/w-help1.ddl b/hl/tools/testfiles/w-help1.ddl
new file mode 100644
index 0000000..8e75242
--- /dev/null
+++ b/hl/tools/testfiles/w-help1.ddl
@@ -0,0 +1,38 @@
+#############################
+ output for 'h5watch --help'
+#############################
+Usage: h5watch [OPTIONS] [OBJECT]
+
+ OPTIONS
+ --help Print a usage message and exit.
+ --version Print version number and exit.
+ --label Label members of compound typed dataset.
+ --simple Use a machine-readable output format.
+ --dim Monitor changes in size of dataset dimensions only.
+ --width=N Set the number of columns to N for output.
+ A value of 0 sets the number of columns to the
+ maximum (65535). The default width is 80 columns.
+ --polling=N Set the polling interval to N (in seconds) when the
+ dataset will be checked for appended data. The default
+ polling interval is 1.
+ --fields=<list_of_fields>
+ Display data for the fields specified in <list_of_fields>
+ for a compound data type. <list_of_fields> can be
+ specified as follows:
+ 1) A comma-separated list of field names in a
+ compound data type. "," is the separator
+ for field names while "." is the separator
+ for a nested field.
+ 2) A single field name in a compound data type.
+ Can use this option multiple times.
+ Note that backslash is the escape character to avoid
+ characters in field names that conflict with the tool's
+ separators.
+
+ OBJECT is specified as [<filename>/<path_to_dataset>/<dsetname>]
+ <filename> Name of the HDF5 file. It may be preceded by path
+ separated by slashes to the specified HDF5 file.
+ <path_to_dataset> Path separated by slashes to the specified dataset
+ <dsetname> Name of the dataset
+
+ User can end the h5watch process by ctrl-C (SIGINT) or kill the process (SIGTERM).
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 2c9ca8a..0fbd32b 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -44,6 +44,7 @@ IDE_GENERATED_PROPERTIES ("H5A" "${H5A_HDRS}" "${H5A_SRCS}" )
set (H5AC_SRCS
${HDF5_SRC_DIR}/H5AC.c
+ ${HDF5_SRC_DIR}/H5AClog.c
${HDF5_SRC_DIR}/H5ACmpio.c
)
@@ -103,20 +104,25 @@ IDE_GENERATED_PROPERTIES ("H5CS" "${H5CS_HDRS}" "${H5CS_SRCS}" )
set (H5D_SRCS
${HDF5_SRC_DIR}/H5D.c
${HDF5_SRC_DIR}/H5Dbtree.c
+ ${HDF5_SRC_DIR}/H5Dbtree2.c
${HDF5_SRC_DIR}/H5Dchunk.c
${HDF5_SRC_DIR}/H5Dcompact.c
${HDF5_SRC_DIR}/H5Dcontig.c
${HDF5_SRC_DIR}/H5Ddbg.c
${HDF5_SRC_DIR}/H5Ddeprec.c
+ ${HDF5_SRC_DIR}/H5Dearray.c
${HDF5_SRC_DIR}/H5Defl.c
+ ${HDF5_SRC_DIR}/H5Dfarray.c
${HDF5_SRC_DIR}/H5Dfill.c
${HDF5_SRC_DIR}/H5Dint.c
${HDF5_SRC_DIR}/H5Dio.c
${HDF5_SRC_DIR}/H5Dlayout.c
${HDF5_SRC_DIR}/H5Dmpio.c
+ ${HDF5_SRC_DIR}/H5Dnone.c
${HDF5_SRC_DIR}/H5Doh.c
${HDF5_SRC_DIR}/H5Dscatgath.c
${HDF5_SRC_DIR}/H5Dselect.c
+ ${HDF5_SRC_DIR}/H5Dsingle.c
${HDF5_SRC_DIR}/H5Dtest.c
${HDF5_SRC_DIR}/H5Dvirtual.c
)
@@ -196,6 +202,7 @@ set (H5FA_SRCS
${HDF5_SRC_DIR}/H5FAdblkpage.c
${HDF5_SRC_DIR}/H5FAdblock.c
${HDF5_SRC_DIR}/H5FAhdr.c
+ ${HDF5_SRC_DIR}/H5FAint.c
${HDF5_SRC_DIR}/H5FAstat.c
${HDF5_SRC_DIR}/H5FAtest.c
)
@@ -218,6 +225,7 @@ set (H5FD_SRCS
${HDF5_SRC_DIR}/H5FDsec2.c
${HDF5_SRC_DIR}/H5FDspace.c
${HDF5_SRC_DIR}/H5FDstdio.c
+ ${HDF5_SRC_DIR}/H5FDtest.c
${HDF5_SRC_DIR}/H5FDwindows.c
)
@@ -258,6 +266,7 @@ set (H5FS_SRCS
${HDF5_SRC_DIR}/H5FS.c
${HDF5_SRC_DIR}/H5FScache.c
${HDF5_SRC_DIR}/H5FSdbg.c
+ ${HDF5_SRC_DIR}/H5FSint.c
${HDF5_SRC_DIR}/H5FSsection.c
${HDF5_SRC_DIR}/H5FSstat.c
${HDF5_SRC_DIR}/H5FStest.c
@@ -306,6 +315,7 @@ set (H5HF_SRCS
${HDF5_SRC_DIR}/H5HFhdr.c
${HDF5_SRC_DIR}/H5HFhuge.c
${HDF5_SRC_DIR}/H5HFiblock.c
+ ${HDF5_SRC_DIR}/H5HFint.c
${HDF5_SRC_DIR}/H5HFiter.c
${HDF5_SRC_DIR}/H5HFman.c
${HDF5_SRC_DIR}/H5HFsection.c
@@ -428,6 +438,7 @@ set (H5O_SRCS
${HDF5_SRC_DIR}/H5Odtype.c
${HDF5_SRC_DIR}/H5Oefl.c
${HDF5_SRC_DIR}/H5Ofill.c
+ ${HDF5_SRC_DIR}/H5Oflush.c
${HDF5_SRC_DIR}/H5Ofsinfo.c
${HDF5_SRC_DIR}/H5Oginfo.c
${HDF5_SRC_DIR}/H5Olayout.c
@@ -438,6 +449,7 @@ set (H5O_SRCS
${HDF5_SRC_DIR}/H5Oname.c
${HDF5_SRC_DIR}/H5Onull.c
${HDF5_SRC_DIR}/H5Opline.c
+ ${HDF5_SRC_DIR}/H5Oproxy.c
${HDF5_SRC_DIR}/H5Orefcount.c
${HDF5_SRC_DIR}/H5Osdspace.c
${HDF5_SRC_DIR}/H5Oshared.c
diff --git a/src/H5AC.c b/src/H5AC.c
index 93f1ee4..16b3880 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -32,7 +32,8 @@
/****************/
#include "H5ACmodule.h" /* This source code file is part of the H5AC module */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5C_FRIEND /* Suppress error about including H5Cpkg */
+#define H5F_FRIEND /* Suppress error about including H5Fpkg */
/***********/
@@ -40,12 +41,13 @@
/***********/
#include "H5private.h" /* Generic Functions */
#include "H5ACpkg.h" /* Metadata cache */
-#include "H5Cprivate.h" /* Cache */
+#include "H5Cpkg.h" /* Cache */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */
#include "H5FDprivate.h" /* File drivers */
#include "H5Iprivate.h" /* IDs */
#include "H5Pprivate.h" /* Property lists */
+#include "H5SLprivate.h" /* Skip Lists */
/****************/
@@ -66,6 +68,9 @@ static herr_t H5AC__check_if_write_permitted(const H5F_t *f,
hbool_t *write_permitted_ptr);
static herr_t H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
H5C_auto_size_ctl_t *int_conf_ptr);
+#if H5AC_DO_TAGGING_SANITY_CHECKS
+static herr_t H5AC__verify_tag(hid_t dxpl_id, const H5AC_class_t * type);
+#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
/*********************/
@@ -105,6 +110,7 @@ static const char *H5AC_entry_type_names[H5AC_NTYPES] =
"global heaps",
"object headers",
"object header chunks",
+ "object header proxies",
"v2 B-tree headers",
"v2 B-tree internal nodes",
"v2 B-tree leaf nodes",
@@ -412,6 +418,16 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "H5C_set_prefix() failed")
#endif /* H5_HAVE_PARALLEL */
+ /* Turn on metadata cache logging, if being used */
+ if(f->shared->use_mdc_logging) {
+ if(H5C_set_up_logging(f->shared->cache, f->shared->mdc_log_location, f->shared->start_mdc_log_on_access) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "mdc logging setup failed")
+
+ /* Write the log header regardless of current logging status */
+ if(H5AC__write_create_cache_log_msg(f->shared->cache) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+ } /* end if */
+
/* Set the cache parameters */
if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
@@ -471,10 +487,13 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
H5AC_stats(f);
#endif /* H5AC_DUMP_STATS_ON_CLOSE */
-#if H5AC__TRACE_FILE_ENABLED
- if(H5AC_close_trace_file(f->shared->cache) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_close_trace_file() failed.")
-#endif /* H5AC__TRACE_FILE_ENABLED */
+ if(f->shared->use_mdc_logging) {
+ /* Write the log footer regardless of current logging status */
+ if(H5AC__write_destroy_cache_log_msg(f->shared->cache) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+ if(H5C_tear_down_logging(f->shared->cache) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "mdc logging tear-down failed")
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
aux_ptr = H5C_get_aux_ptr(f->shared->cache);
@@ -511,6 +530,51 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC_evict
+ *
+ * Purpose: Evict all entries except the pinned entries
+ * in the cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_evict(H5F_t *f, hid_t dxpl_id)
+{
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
+ /* Evict all entries in the cache except the pinned superblock entry */
+ if(H5C_evict(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't evict cache")
+
+done:
+
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_evict_cache_log_msg(f->shared->cache, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_evict() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_expunge_entry
*
* Purpose: Expunge the target entry from the cache without writing it
@@ -532,6 +596,8 @@ H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -544,6 +610,10 @@ H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
#if H5AC__TRACE_FILE_ENABLED
{
H5AC_t * cache_ptr = f->shared->cache;
@@ -566,6 +636,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_expunge_entry_log_msg(f->shared->cache, addr, type->id, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_expunge_entry() */
@@ -597,6 +672,8 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -606,6 +683,10 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
HDassert(f->shared);
HDassert(f->shared->cache);
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
#if H5AC__TRACE_FILE_ENABLED
/* For the flush, only the flags are really necessary in the trace file.
* Write the result to catch occult errors.
@@ -631,6 +712,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_flush_cache_log_msg(f->shared->cache, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_flush() */
@@ -663,6 +749,7 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
hbool_t is_dirty; /* Entry @ addr is in the cache and dirty */
hbool_t is_protected; /* Entry @ addr is in the cache and protected */
hbool_t is_pinned; /* Entry @ addr is in the cache and pinned */
+ hbool_t is_corked;
hbool_t is_flush_dep_child; /* Entry @ addr is in the cache and is a flush dependency child */
hbool_t is_flush_dep_parent; /* Entry @ addr is in the cache and is a flush dependency parent */
herr_t ret_value = SUCCEED; /* Return value */
@@ -673,7 +760,7 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
if(H5C_get_entry_status(f, addr, NULL, &in_cache, &is_dirty,
- &is_protected, &is_pinned, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ &is_protected, &is_pinned, &is_corked, &is_flush_dep_parent, &is_flush_dep_child) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed.")
if(in_cache) {
@@ -684,6 +771,8 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
*status |= H5AC_ES__IS_PROTECTED;
if(is_pinned)
*status |= H5AC_ES__IS_PINNED;
+ if(is_corked)
+ *status |= H5AC_ES__IS_CORKED;
if(is_flush_dep_parent)
*status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
if(is_flush_dep_child)
@@ -721,6 +810,8 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
size_t trace_entry_size = 0;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -734,6 +825,10 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
HDassert(H5F_addr_defined(addr));
HDassert(thing);
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
/* Check for invalid access request */
if(0 == (H5F_INTENT(f) & H5F_ACC_RDWR))
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "no write intent on file")
@@ -751,6 +846,11 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
flags);
#endif /* H5AC__TRACE_FILE_ENABLED */
+#if H5AC_DO_TAGGING_SANITY_CHECKS
+ if (!f->shared->cache->ignore_tags && (H5AC__verify_tag(dxpl_id, type) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Bad tag value")
+#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
+
/* Insert entry into metadata cache */
if(H5C_insert_entry(f, dxpl_id, type, addr, thing, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_insert_entry() failed")
@@ -783,6 +883,10 @@ done:
if(trace_file_ptr != NULL)
HDfprintf(trace_file_ptr, "%s %d %d\n", trace, (int)trace_entry_size, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_insert_entry_log_msg(f->shared->cache, addr, type->id, flags, ((H5C_cache_entry_t *)thing)->size, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_insert_entry() */
@@ -808,6 +912,10 @@ H5AC_mark_entry_dirty(void *thing)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -825,10 +933,15 @@ H5AC_mark_entry_dirty(void *thing)
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
+ entry_ptr = (H5AC_info_t *)thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
#ifdef H5_HAVE_PARALLEL
{
- H5AC_info_t *entry_ptr = (H5AC_info_t *)thing;
- H5C_t *cache_ptr = entry_ptr->cache_ptr;
H5AC_aux_t *aux_ptr;
aux_ptr = H5C_get_aux_ptr(cache_ptr);
@@ -848,6 +961,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_mark_dirty_entry_log_msg(cache_ptr, entry_ptr, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_mark_entry_dirty() */
@@ -876,6 +994,8 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t *aux_ptr;
#endif /* H5_HAVE_PARALLEL */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -888,6 +1008,10 @@ H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t ne
HDassert(H5F_addr_defined(new_addr));
HDassert(H5F_addr_ne(old_addr, new_addr));
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
#if H5AC__TRACE_FILE_ENABLED
/* For the move call, only the old addr and new addr are really
* necessary in the trace file. Include the type id so we don't have to
@@ -921,6 +1045,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_move_entry_log_msg(f->shared->cache, old_addr, new_addr, type->id, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_move_entry() */
@@ -945,6 +1074,10 @@ H5AC_pin_protected_entry(void *thing)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -961,6 +1094,16 @@ H5AC_pin_protected_entry(void *thing)
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
+ entry_ptr = (H5AC_info_t *)thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
/* Pin entry */
if(H5C_pin_protected_entry(thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "can't pin entry")
@@ -971,6 +1114,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_pin_entry_log_msg(cache_ptr, entry_ptr, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_pin_protected_entry() */
@@ -995,6 +1143,10 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1010,6 +1162,16 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
(unsigned long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
+ entry_ptr = (H5AC_info_t *)parent_thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
/* Create the flush dependency */
if(H5C_create_flush_dependency(parent_thing, child_thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "H5C_create_flush_dependency() failed.")
@@ -1020,6 +1182,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_create_fd_log_msg(cache_ptr, (H5AC_info_t *)parent_thing, (H5AC_info_t *)child_thing, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_create_flush_dependency() */
@@ -1057,8 +1224,10 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
size_t trace_entry_size = 0;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
- void * thing = NULL; /* Pointer to native data structure for entry */
- void * ret_value = NULL; /* Return value */
+ void *thing = NULL; /* Pointer to native data structure for entry */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ void *ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI(NULL)
@@ -1070,6 +1239,10 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "unable to get logging status")
+
/* Check for unexpected flags -- H5C__FLUSH_COLLECTIVELY_FLAG
* only permitted in the parallel case.
*/
@@ -1096,6 +1269,11 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
(int)(type->id), flags);
#endif /* H5AC__TRACE_FILE_ENABLED */
+#if H5AC_DO_TAGGING_SANITY_CHECKS
+ if (!f->shared->cache->ignore_tags && (H5AC__verify_tag(dxpl_id, type) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Bad tag value")
+#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
+
if(NULL == (thing = H5C_protect(f, dxpl_id, type, addr, udata, flags)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
@@ -1114,6 +1292,14 @@ done:
HDfprintf(trace_file_ptr, "%s %d %d\n", trace, (int)trace_entry_size, (int)(ret_value != NULL));
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging) {
+ herr_t fake_ret_value = (NULL == ret_value) ? FAIL : SUCCEED;
+
+ if(H5AC__write_protect_entry_log_msg(f->shared->cache, (H5AC_info_t *)thing, flags, fake_ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "unable to emit log message")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_protect() */
@@ -1137,6 +1323,10 @@ H5AC_resize_entry(void *thing, size_t new_size)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1155,14 +1345,22 @@ H5AC_resize_entry(void *thing, size_t new_size)
(int)new_size);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ entry_ptr = (H5AC_info_t *)thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
/* Resize the entry */
if(H5C_resize_entry(thing, new_size) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "can't resize entry")
#ifdef H5_HAVE_PARALLEL
{
- H5AC_info_t * entry_ptr = (H5AC_info_t *)thing;
- H5C_t *cache_ptr = entry_ptr->cache_ptr;
H5AC_aux_t *aux_ptr;
aux_ptr = H5C_get_aux_ptr(cache_ptr);
@@ -1178,6 +1376,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_resize_entry_log_msg(cache_ptr, entry_ptr, new_size, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_resize_entry() */
@@ -1202,6 +1405,10 @@ H5AC_unpin_entry(void *thing)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1218,6 +1425,16 @@ H5AC_unpin_entry(void *thing)
(unsigned long)(((H5C_cache_entry_t *)thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
+ entry_ptr = (H5AC_info_t *)thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
/* Unpin the entry */
if(H5C_unpin_entry(thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "can't unpin entry")
@@ -1228,6 +1445,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_unpin_entry_log_msg(cache_ptr, entry_ptr, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_unpin_entry() */
@@ -1251,6 +1473,10 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
char trace[128] = "";
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1266,6 +1492,16 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
(unsigned long long)(((H5C_cache_entry_t *)child_thing)->addr));
#endif /* H5AC__TRACE_FILE_ENABLED */
+ entry_ptr = (H5AC_info_t *)parent_thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
/* Destroy the flush dependency */
if(H5C_destroy_flush_dependency(parent_thing, child_thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "H5C_destroy_flush_dependency() failed.")
@@ -1276,6 +1512,11 @@ done:
HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_destroy_fd_log_msg(cache_ptr, (H5AC_info_t *)parent_thing, (H5AC_info_t *)child_thing, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_destroy_flush_dependency() */
@@ -1331,6 +1572,8 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1347,6 +1590,10 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
HDassert( ((H5AC_info_t *)thing)->addr == addr );
HDassert( ((H5AC_info_t *)thing)->type == type );
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(f->shared->cache, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
#if H5AC__TRACE_FILE_ENABLED
/* For the unprotect call, only the addr, type id, flags, and possible
* new size are really necessary in the trace file. Write the return
@@ -1403,6 +1650,11 @@ done:
HDfprintf(trace_file_ptr, "%s 0x%x %d\n", trace, (unsigned)flags, (int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_unprotect_entry_log_msg(f->shared->cache, (H5AC_info_t *)thing, type->id, flags, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_unprotect() */
@@ -1668,6 +1920,8 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config
H5AC_cache_config_t trace_config = H5AC__DEFAULT_CACHE_CONFIG;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
H5C_auto_size_ctl_t internal_config;
herr_t ret_value = SUCCEED; /* Return value */
@@ -1676,6 +1930,10 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config
/* Sanity checks */
HDassert(cache_ptr);
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to get logging status")
+
#if H5AC__TRACE_FILE_ENABLED
/* Make note of the new configuration. Don't look up the trace file
* pointer, as that may change before we use it.
@@ -1784,6 +2042,11 @@ done:
(int)ret_value);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_set_cache_config_log_msg(cache_ptr, config_ptr, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_set_cache_auto_resize_config() */
@@ -2264,6 +2527,7 @@ done:
herr_t
H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag)
{
+ H5C_tag_t tag; /* Tag structure */
H5P_genplist_t *dxpl; /* Dataset transfer property list */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2274,12 +2538,32 @@ H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "not a property list")
/* Get the current tag value and return that (if prev_tag is NOT null) */
- if(prev_tag)
- if((H5P_get(dxpl, "H5AC_metadata_tag", prev_tag)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to query dxpl")
+ if(prev_tag) {
+ if((H5P_get(dxpl, "H5C_tag", &tag)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query dxpl")
+ *prev_tag = tag.value;
+ } /* end if */
- /* Set the provided tag value in the dxpl_id. */
- if(H5P_set(dxpl, "H5AC_metadata_tag", &metadata_tag) < 0)
+ /* Add metadata_tag to tag structure */
+ tag.value = metadata_tag;
+
+ /* Determine globality of tag */
+ switch(metadata_tag) {
+ case H5AC__SUPERBLOCK_TAG:
+ case H5AC__SOHM_TAG:
+ case H5AC__GLOBALHEAP_TAG:
+ tag.globality = H5C_GLOBALITY_MAJOR;
+ break;
+ case H5AC__FREESPACE_TAG:
+ tag.globality = H5C_GLOBALITY_MINOR;
+ break;
+ default:
+ tag.globality = H5C_GLOBALITY_NONE;
+ break;
+ } /* end switch */
+
+ /* Set the provided tag in the dxpl_id. */
+ if(H5P_set(dxpl, "H5C_tag", &tag) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set property in dxpl")
done:
@@ -2310,13 +2594,216 @@ H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag)
HDassert(f);
HDassert(f->shared);
- /* Call cache-level function to retag entries */
- H5C_retag_copied_metadata(f->shared->cache, metadata_tag);
+ /* Call cache-level function to re-tag entries with the COPIED tag */
+ H5C_retag_entries(f->shared->cache, H5AC__COPIED_TAG, metadata_tag);
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_retag_copied_metadata */
+/*------------------------------------------------------------------------------
+ * Function: H5AC_flush_tagged_metadata()
+ *
+ * Purpose: Wrapper for cache level function which flushes all metadata
+ * that contains the specific tag.
+ *
+ * Return: SUCCEED on success, FAIL otherwise.
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *------------------------------------------------------------------------------
+ */
+herr_t
+H5AC_flush_tagged_metadata(H5F_t * f, haddr_t metadata_tag, hid_t dxpl_id)
+{
+ /* Variable Declarations */
+ herr_t ret_value = SUCCEED;
+
+ /* Function Enter Macro */
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Assertions */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Call cache level function to flush metadata entries with specified tag */
+ if(H5C_flush_tagged_entries(f, dxpl_id, metadata_tag) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cannot flush metadata")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_flush_tagged_metadata */
+
+
+/*------------------------------------------------------------------------------
+ * Function: H5AC_evict_tagged_metadata()
+ *
+ * Purpose: Wrapper for cache level function which flushes all metadata
+ * that contains the specific tag.
+ *
+ * Return: SUCCEED on success, FAIL otherwise.
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *------------------------------------------------------------------------------
+ */
+herr_t
+H5AC_evict_tagged_metadata(H5F_t * f, haddr_t metadata_tag, hid_t dxpl_id)
+{
+ /* Variable Declarations */
+ herr_t ret_value = SUCCEED;
+
+ /* Function Enter Macro */
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Assertions */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Call cache level function to evict metadata entries with specified tag */
+ if(H5C_evict_tagged_entries(f, dxpl_id, metadata_tag)<0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cannot evict metadata")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5AC_evict_tagged_metadata */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_cork
+ *
+ * Purpose: To cork/uncork/get cork status for an object
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_cork(H5F_t *f, haddr_t obj_addr, unsigned action, hbool_t *corked)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(H5F_addr_defined(obj_addr));
+ HDassert(action == H5AC__SET_CORK || action == H5AC__UNCORK || action == H5AC__GET_CORKED);
+
+ if(action == H5AC__GET_CORKED)
+ HDassert(corked);
+
+ if(H5C_cork(f->shared->cache, obj_addr, action, corked) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Cannot perform the cork action")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_cork() */
+
+#if H5AC_DO_TAGGING_SANITY_CHECKS
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC__verify_tag
+ *
+ * Purpose: Performs sanity checking on an entry type and tag value
+ * stored in a supplied dxpl_id.
+ *
+ * Return: SUCCEED or FAIL.
+ *
+ * Programmer: Mike McGreevy
+ * October 20, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5AC__verify_tag(hid_t dxpl_id, const H5AC_class_t * type)
+{
+ H5C_tag_t tag;
+ H5P_genplist_t * dxpl;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Get the dataset transfer property list */
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object_verify(dxpl_id, H5I_GENPROP_LST)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
+
+ /* Get the tag from the DXPL */
+ if( (H5P_get(dxpl, "H5C_tag", &tag)) < 0 )
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query property value");
+
+ /* Perform some sanity checks on tag value. Certain entry
+ * types require certain tag values, so check that these
+ * constraints are met. */
+ if(tag.value == H5AC__IGNORE_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "cannot ignore a tag while doing verification.")
+ else if(tag.value == H5AC__INVALID_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "no metadata tag provided")
+ else {
+
+ /* Perform some sanity checks on tag value. Certain entry
+ * types require certain tag values, so check that these
+ * constraints are met. */
+
+ /* Superblock */
+ if(type->id == H5AC_SUPERBLOCK_ID || type->id == H5AC_DRVRINFO_ID) {
+ if(tag.value != H5AC__SUPERBLOCK_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "superblock/driver-info not tagged with H5AC__SUPERBLOCK_TAG")
+ if(tag.globality != H5C_GLOBALITY_MAJOR)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "superblock/driver-info globality not marked with H5C_GLOBALITY_MAJOR")
+ }
+ else {
+ if(tag.value == H5AC__SUPERBLOCK_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__SUPERBLOCK_TAG applied to non-superblock entry")
+ }
+
+ /* Free Space Manager */
+ if((type->id == H5AC_FSPACE_HDR_ID) || (type->id == H5AC_FSPACE_SINFO_ID)) {
+ if(tag.value != H5AC__FREESPACE_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "freespace entry not tagged with H5AC__FREESPACE_TAG")
+ if(tag.globality != H5C_GLOBALITY_MINOR)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "freespace entry globality not marked with H5C_GLOBALITY_MINOR")
+ }
+ else {
+ if(tag.value == H5AC__FREESPACE_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__FREESPACE_TAG applied to non-freespace entry")
+ }
+
+ /* SOHM */
+ if((type->id == H5AC_SOHM_TABLE_ID) || (type->id == H5AC_SOHM_LIST_ID)) {
+ if(tag.value != H5AC__SOHM_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "sohm entry not tagged with H5AC__SOHM_TAG")
+ if(tag.globality != H5C_GLOBALITY_MAJOR)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "sohm entry globality not marked with H5C_GLOBALITY_MAJOR")
+ }
+
+ /* Global Heap */
+ if(type->id == H5AC_GHEAP_ID) {
+ if(tag.value != H5AC__GLOBALHEAP_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "global heap not tagged with H5AC__GLOBALHEAP_TAG")
+ if(tag.globality != H5C_GLOBALITY_MAJOR)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "global heap entry globality not marked with H5C_GLOBALITY_MAJOR")
+ }
+ else {
+ if(tag.value == H5AC__GLOBALHEAP_TAG)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__GLOBALHEAP_TAG applied to non-globalheap entry")
+ }
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__verify_tag */
+#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
+
+
/*-------------------------------------------------------------------------
* Function: H5AC_get_entry_ring
*
diff --git a/src/H5AClog.c b/src/H5AClog.c
new file mode 100644
index 0000000..11579d8
--- /dev/null
+++ b/src/H5AClog.c
@@ -0,0 +1,892 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5AClog_json.c
+ *
+ * Purpose: Functions for metadata cache logging in JSON format
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+#include "H5ACmodule.h" /* This source code file is part of the H5AC module */
+#define H5C_FRIEND /* Suppress error about including H5Cpkg */
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5ACpkg.h" /* Metadata cache */
+#include "H5Cpkg.h" /* Cache */
+#include "H5Eprivate.h" /* Error handling */
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define MSG_SIZE 128
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_create_cache_log_msg
+ *
+ * Purpose: Write a log message for cache creation.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_create_cache_log_msg
+ *
+ * Purpose: Write a log message for cache creation.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_create_cache_log_msg(H5AC_t *cache)
+{
+ char msg[MSG_SIZE];
+ hbool_t orig_state; /* saved "current logging" flag state */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\n\
+\"create_time\":%lld,\n\
+\"messages\":\n\
+[\n\
+"
+ , (long long)time(NULL));
+
+ /* Since we're about to override the current logging flag,
+ * check the "log enabled" flag to see if we didn't get here
+ * by mistake.
+ */
+ if(!(cache->logging_enabled))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "attempt to write opening log message when logging is disabled")
+
+ /* Write the log message to the file
+ * Have to temporarily enable logging for this.
+ */
+ orig_state = cache->currently_logging;
+ cache->currently_logging = TRUE;
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+ cache->currently_logging = orig_state;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_create_cache_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_destroy_cache_log_msg
+ *
+ * Purpose: Write a log message for cache destruction.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_destroy_cache_log_msg(H5AC_t *cache)
+{
+ char msg[MSG_SIZE];
+ hbool_t orig_state; /* saved "current logging" flag state */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+],\n\
+\"close_time\":%lld,\n\
+}\n\
+"
+ , (long long)time(NULL));
+
+ /* Since we're about to override the current logging flag,
+ * check the "log enabled" flag to see if we didn't get here
+ * by mistake.
+ */
+ if(!(cache->logging_enabled))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "attempt to write closing log message when logging is disabled")
+
+ /* Write the log message to the file
+ * Have to temporarily enable logging for this.
+ */
+ orig_state = cache->currently_logging;
+ cache->currently_logging = TRUE;
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+ cache->currently_logging = orig_state;
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_destroy_cache_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_evict_cache_log_msg
+ *
+ * Purpose: Write a log message for eviction of cache entries.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_evict_cache_log_msg(const H5AC_t *cache,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"evict\",\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_evict_cache_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_expunge_entry_log_msg
+ *
+ * Purpose: Write a log message for expunge of cache entries.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_expunge_entry_log_msg(const H5AC_t *cache,
+ haddr_t address,
+ int type_id,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"expunge\",\
+\"address\":0x%lx,\
+\"type_id\":%d,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)address, (int)type_id, (int)fxn_ret_value);
+
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_expunge_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_flush_cache_log_msg
+ *
+ * Purpose: Write a log message for cache flushes.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_flush_cache_log_msg(const H5AC_t *cache,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"flush\",\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_flush_cache_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_insert_entry_log_msg
+ *
+ * Purpose: Write a log message for insertion of cache entries.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_insert_entry_log_msg(const H5AC_t *cache,
+ haddr_t address,
+ int type_id,
+ unsigned flags,
+ size_t size,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"insert\",\
+\"address\":0x%lx,\
+\"flags\":0x%x,\
+\"type_id\":%d,\
+\"size\":%d,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)address, flags, type_id,
+ (int)size, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_insert_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_mark_dirty_entry_log_msg
+ *
+ * Purpose: Write a log message for marking cache entries as dirty.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_mark_dirty_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"dirty\",\
+\"address\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)entry->addr, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_mark_dirty_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_move_entry_log_msg
+ *
+ * Purpose: Write a log message for moving a cache entry.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_move_entry_log_msg(const H5AC_t *cache,
+ haddr_t old_addr,
+ haddr_t new_addr,
+ int type_id,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"move\",\
+\"old_address\":0x%lx,\
+\"new_address\":0x%lx,\
+\"type_id\":%d,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)old_addr,
+ (unsigned long)new_addr, type_id, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_move_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_pin_entry_log_msg
+ *
+ * Purpose: Write a log message for pinning a cache entry.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_pin_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"pin\",\
+\"address\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)entry->addr,
+ (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_pin_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_create_fd_log_msg
+ *
+ * Purpose: Write a log message for creating a flush dependency between
+ * two cache entries.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_create_fd_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *parent,
+ const H5AC_info_t *child,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(parent);
+ HDassert(child);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"create_fd\",\
+\"parent_addr\":0x%lx,\
+\"child_addr\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)parent->addr,
+ (unsigned long)child->addr, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_create_fd_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_protect_entry_log_msg
+ *
+ * Purpose: Write a log message for protecting a cache entry.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_protect_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ unsigned flags,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ char rw_s[16];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry);
+
+ if(H5AC__READ_ONLY_FLAG == flags)
+ HDstrcpy(rw_s, "READ");
+ else
+ HDstrcpy(rw_s, "WRITE");
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"protect\",\
+\"address\":0x%lx,\
+\"readwrite\":\"%s\",\
+\"size\":%d,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)entry->addr,
+ rw_s, (int)entry->size, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_protect_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_resize_entry_log_msg
+ *
+ * Purpose: Write a log message for resizing a cache entry.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_resize_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ size_t new_size,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"resize\",\
+\"address\":0x%lx,\
+\"new_size\":%d,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)entry->addr,
+ (int)new_size, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_resize_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_unpin_entry_log_msg
+ *
+ * Purpose: Write a log message for unpinning a cache entry.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_unpin_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"unpin\",\
+\"address\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)entry->addr,
+ (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_unpin_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_destroy_fd_log_msg
+ *
+ * Purpose: Write a log message for destroying a flush dependency
+ * between two cache entries.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_destroy_fd_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *parent,
+ const H5AC_info_t *child,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(parent);
+ HDassert(child);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"destroy_fd\",\
+\"parent_addr\":0x%lx,\
+\"child_addr\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)parent->addr,
+ (unsigned long)child->addr, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_destroy_fd_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_unprotect_entry_log_msg
+ *
+ * Purpose: Write a log message for unprotecting a cache entry.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_unprotect_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ int type_id,
+ unsigned flags,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"unprotect\",\
+\"address\":0x%lx,\
+\"id\":%d,\
+\"flags\":%x,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (unsigned long)entry->addr,
+ type_id, flags, (int)fxn_ret_value);
+
+ HDsnprintf(msg, MSG_SIZE, " ");
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_unprotect_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_set_cache_config_log_msg
+ *
+ * Purpose: Write a log message for setting the cache configuration.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_set_cache_config_log_msg(const H5AC_t *cache,
+ const H5AC_cache_config_t *config,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+ HDassert(config);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"set_config\",\
+\"returned\":%d\
+},\n\
+"
+ , (long long)time(NULL), (int)fxn_ret_value);
+
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_set_cache_config_log_msg() */
+
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index fdad19a..8655a6f 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -1016,7 +1016,7 @@ H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr, haddr_t new_addr)
/* get entry status, size, etc here */
if(H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache,
- &entry_dirty, NULL, NULL, NULL, NULL) < 0)
+ &entry_dirty, NULL, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get entry status.")
if(!entry_in_cache)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry not in cache.")
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 1051373..82e500e 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -420,5 +420,65 @@ H5_DLL herr_t H5AC__set_write_done_callback(H5C_t * cache_ptr,
#endif /* H5_HAVE_PARALLEL */
+/******************************/
+/* Package Private Prototypes */
+/******************************/
+
+/* Cache logging routines */
+H5_DLL herr_t H5AC__write_create_cache_log_msg(H5AC_t *cache);
+H5_DLL herr_t H5AC__write_destroy_cache_log_msg(H5AC_t *cache);
+H5_DLL herr_t H5AC__write_evict_cache_log_msg(const H5AC_t *cache,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_expunge_entry_log_msg(const H5AC_t *cache,
+ haddr_t address,
+ int type_id,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_flush_cache_log_msg(const H5AC_t *cache,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_insert_entry_log_msg(const H5AC_t *cache,
+ haddr_t address,
+ int type_id,
+ unsigned flags,
+ size_t size,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_mark_dirty_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_move_entry_log_msg(const H5AC_t *cache,
+ haddr_t old_addr,
+ haddr_t new_addr,
+ int type_id,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_pin_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_create_fd_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *parent,
+ const H5AC_info_t *child,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_protect_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ unsigned flags,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_resize_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ size_t new_size,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_unpin_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_destroy_fd_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *parent,
+ const H5AC_info_t *child,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_unprotect_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ int type_id,
+ unsigned flags,
+ herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_set_cache_config_log_msg(const H5AC_t *cache,
+ const H5AC_cache_config_t *config,
+ herr_t fxn_ret_value);
+
#endif /* _H5ACpkg_H */
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index e634a45..1c81976 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -53,35 +53,36 @@
/* Types of metadata objects cached */
typedef enum {
- H5AC_BT_ID = 0, /*B-tree nodes */
- H5AC_SNODE_ID, /*symbol table nodes */
- H5AC_LHEAP_PRFX_ID, /*local heap prefix */
- H5AC_LHEAP_DBLK_ID, /*local heap data block */
- H5AC_GHEAP_ID, /*global heap */
- H5AC_OHDR_ID, /*object header */
- H5AC_OHDR_CHK_ID, /*object header chunk */
- H5AC_BT2_HDR_ID, /*v2 B-tree header */
- H5AC_BT2_INT_ID, /*v2 B-tree internal node */
- H5AC_BT2_LEAF_ID, /*v2 B-tree leaf node */
- H5AC_FHEAP_HDR_ID, /*fractal heap header */
- H5AC_FHEAP_DBLOCK_ID, /*fractal heap direct block */
- H5AC_FHEAP_IBLOCK_ID, /*fractal heap indirect block */
- H5AC_FSPACE_HDR_ID, /*free space header */
- H5AC_FSPACE_SINFO_ID,/*free space sections */
- H5AC_SOHM_TABLE_ID, /*shared object header message master table */
- H5AC_SOHM_LIST_ID, /*shared message index stored as a list */
- H5AC_EARRAY_HDR_ID, /*extensible array header */
- H5AC_EARRAY_IBLOCK_ID, /*extensible array index block */
- H5AC_EARRAY_SBLOCK_ID, /*extensible array super block */
- H5AC_EARRAY_DBLOCK_ID, /*extensible array data block */
- H5AC_EARRAY_DBLK_PAGE_ID, /*extensible array data block page */
- H5AC_FARRAY_HDR_ID, /*fixed array header */
- H5AC_FARRAY_DBLOCK_ID, /*fixed array data block */
- H5AC_FARRAY_DBLK_PAGE_ID, /*fixed array data block page */
- H5AC_SUPERBLOCK_ID, /* file superblock */
- H5AC_DRVRINFO_ID, /* driver info block (supplements superblock)*/
- H5AC_TEST_ID, /*test entry -- not used for actual files */
- H5AC_NTYPES /* Number of types, must be last */
+ H5AC_BT_ID = 0, /* ( 0) B-tree nodes */
+ H5AC_SNODE_ID, /* ( 1) symbol table nodes */
+ H5AC_LHEAP_PRFX_ID, /* ( 2) local heap prefix */
+ H5AC_LHEAP_DBLK_ID, /* ( 3) local heap data block */
+ H5AC_GHEAP_ID, /* ( 4) global heap */
+ H5AC_OHDR_ID, /* ( 5) object header */
+ H5AC_OHDR_CHK_ID, /* ( 6) object header chunk */
+ H5AC_OHDR_PROXY_ID, /* ( 7) object header proxy */
+ H5AC_BT2_HDR_ID, /* ( 8) v2 B-tree header */
+ H5AC_BT2_INT_ID, /* ( 9) v2 B-tree internal node */
+ H5AC_BT2_LEAF_ID, /* (10) v2 B-tree leaf node */
+ H5AC_FHEAP_HDR_ID, /* (11) fractal heap header */
+ H5AC_FHEAP_DBLOCK_ID, /* (12) fractal heap direct block */
+ H5AC_FHEAP_IBLOCK_ID, /* (13) fractal heap indirect block */
+ H5AC_FSPACE_HDR_ID, /* (14) free space header */
+ H5AC_FSPACE_SINFO_ID, /* (15) free space sections */
+ H5AC_SOHM_TABLE_ID, /* (16) shared object header message master table */
+ H5AC_SOHM_LIST_ID, /* (17) shared message index stored as a list */
+ H5AC_EARRAY_HDR_ID, /* (18) extensible array header */
+ H5AC_EARRAY_IBLOCK_ID, /* (19) extensible array index block */
+ H5AC_EARRAY_SBLOCK_ID, /* (20) extensible array super block */
+ H5AC_EARRAY_DBLOCK_ID, /* (21) extensible array data block */
+ H5AC_EARRAY_DBLK_PAGE_ID, /* (22) extensible array data block page */
+ H5AC_FARRAY_HDR_ID, /* (23) fixed array header */
+ H5AC_FARRAY_DBLOCK_ID, /* (24) fixed array data block */
+ H5AC_FARRAY_DBLK_PAGE_ID, /* (25) fixed array data block page */
+ H5AC_SUPERBLOCK_ID, /* (26) file superblock */
+ H5AC_DRVRINFO_ID, /* (27) driver info block (supplements superblock)*/
+ H5AC_TEST_ID, /* (28) test entry -- not used for actual files */
+ H5AC_NTYPES /* Number of types, must be last */
} H5AC_type_t;
/* H5AC_DUMP_STATS_ON_CLOSE should always be FALSE when
@@ -121,6 +122,12 @@ typedef enum {
#define H5AC__DEFAULT_MAX_CACHE_SIZE H5C__DEFAULT_MAX_CACHE_SIZE
#define H5AC__DEFAULT_MIN_CLEAN_SIZE H5C__DEFAULT_MIN_CLEAN_SIZE
+/* Check if we are sanity checking tagging */
+#if H5C_DO_TAGGING_SANITY_CHECKS
+#define H5AC_DO_TAGGING_SANITY_CHECKS 1
+#else
+#define H5AC_DO_TAGGING_SANITY_CHECKS 0
+#endif
/*
* Class methods pertaining to caching. Each type of cached object will
@@ -132,6 +139,11 @@ typedef enum {
#define H5AC__SERIALIZE_MOVED_FLAG H5C__SERIALIZE_MOVED_FLAG
#define H5AC__SERIALIZE_COMPRESSED_FLAG H5C__SERIALIZE_COMPRESSED_FLAG
+/* Cork actions: cork/uncork/get cork status of an object */
+#define H5AC__SET_CORK H5C__SET_CORK
+#define H5AC__UNCORK H5C__UNCORK
+#define H5AC__GET_CORKED H5C__GET_CORKED
+
/* Aliases for the "ring" type and values */
typedef H5C_ring_t H5AC_ring_t;
#define H5AC_RING_INV H5C_RING_UNDEFINED
@@ -153,11 +165,13 @@ typedef H5C_notify_action_t H5AC_notify_action_t;
#define H5AC__CLASS_COMPRESSED_FLAG H5C__CLASS_COMPRESSED_FLAG
/* The following flags should only appear in test code */
+/* The H5AC__CLASS_SKIP_READS & H5AC__CLASS_SKIP_WRITES flags are used in H5Oproxy.c */
#define H5AC__CLASS_NO_IO_FLAG H5C__CLASS_NO_IO_FLAG
#define H5AC__CLASS_SKIP_READS H5C__CLASS_SKIP_READS
#define H5AC__CLASS_SKIP_WRITES H5C__CLASS_SKIP_WRITES
typedef H5C_get_load_size_func_t H5AC_get_load_size_func_t;
+typedef H5C_verify_chksum_func_t H5AC_verify_chksum_func_t;
typedef H5C_deserialize_func_t H5AC_deserialize_func_t;
typedef H5C_image_len_func_t H5AC_image_len_func_t;
@@ -190,10 +204,6 @@ typedef H5C_t H5AC_t;
#define H5AC_COLLECTIVE_META_WRITE_DEF 0
#endif /* H5_HAVE_PARALLEL */
-#define H5AC_METADATA_TAG_NAME "H5AC_metadata_tag"
-#define H5AC_METADATA_TAG_SIZE sizeof(haddr_t)
-#define H5AC_METADATA_TAG_DEF H5AC__INVALID_TAG
-
#define H5AC_RING_NAME "H5AC_ring_type"
/* Dataset transfer property list for flush calls */
@@ -312,6 +322,7 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
#define H5AC__TAKE_OWNERSHIP_FLAG H5C__TAKE_OWNERSHIP_FLAG
#define H5AC__FLUSH_LAST_FLAG H5C__FLUSH_LAST_FLAG
#define H5AC__FLUSH_COLLECTIVELY_FLAG H5C__FLUSH_COLLECTIVELY_FLAG
+#define H5AC__EVICT_ALLOW_LAST_PINS_FLAG H5C__EVICT_ALLOW_LAST_PINS_FLAG
/* #defines of flags used to report entry status in the
@@ -324,6 +335,7 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
#define H5AC_ES__IS_PINNED 0x0008
#define H5AC_ES__IS_FLUSH_DEP_PARENT 0x0010
#define H5AC_ES__IS_FLUSH_DEP_CHILD 0x0020
+#define H5AC_ES__IS_CORKED 0x0040
/* external function declarations: */
@@ -348,6 +360,7 @@ H5_DLL herr_t H5AC_mark_entry_dirty(void *thing);
H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type,
haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5AC_evict(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id,
const H5AC_class_t *type, haddr_t addr, unsigned flags);
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
@@ -364,8 +377,11 @@ H5_DLL herr_t H5AC_open_trace_file(H5AC_t *cache_ptr, const char *trace_file_nam
/* Tag & Ring routines */
H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag);
+H5_DLL herr_t H5AC_flush_tagged_metadata(H5F_t * f, haddr_t metadata_tag, hid_t dxpl_id);
+H5_DLL herr_t H5AC_evict_tagged_metadata(H5F_t * f, haddr_t metadata_tag, hid_t dxpl_id);
H5_DLL herr_t H5AC_retag_copied_metadata(const H5F_t *f, haddr_t metadata_tag);
H5_DLL herr_t H5AC_ignore_tags(const H5F_t *f);
+H5_DLL herr_t H5AC_cork(H5F_t *f, haddr_t obj_addr, unsigned action, hbool_t *corked);
H5_DLL herr_t H5AC_get_entry_ring(const H5F_t *f, haddr_t addr, H5AC_ring_t *ring);
H5_DLL herr_t H5AC_set_ring(hid_t dxpl_id, H5AC_ring_t ring, H5P_genplist_t **dxpl,
H5AC_ring_t *orig_ring);
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index 4e5502d..dd16764 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -78,6 +78,8 @@ extern "C" {
* open_trace_file: Boolean field indicating whether the trace_file_name
* field should be used to open a trace file for the cache.
*
+ * *** DEPRECATED *** Use H5Fstart/stop logging functions instead
+ *
* The trace file is a debuging feature that allow the capture of
* top level metadata cache requests for purposes of debugging and/or
* optimization. This field should normally be set to FALSE, as
@@ -91,6 +93,8 @@ extern "C" {
* close_trace_file: Boolean field indicating whether the current trace
* file (if any) should be closed.
*
+ * *** DEPRECATED *** Use H5Fstart/stop logging functions instead
+ *
* See the above comments on the open_trace_file field. This field
* should be set to FALSE unless there is an open trace file on the
* cache that you wish to close.
@@ -98,6 +102,8 @@ extern "C" {
* trace_file_name: Full path of the trace file to be opened if the
* open_trace_file field is TRUE.
*
+ * *** DEPRECATED *** Use H5Fstart/stop logging functions instead
+ *
* In the parallel case, an ascii representation of the mpi rank of
* the process will be appended to the file name to yield a unique
* trace file name for each process.
diff --git a/src/H5Adense.c b/src/H5Adense.c
index 3dc3a42..8bc0e05 100644
--- a/src/H5Adense.c
+++ b/src/H5Adense.c
@@ -126,6 +126,7 @@ typedef struct H5A_bt2_ud_rm_t {
/* downward */
H5A_bt2_ud_common_t common; /* Common info for B-tree user data (must be first) */
haddr_t corder_bt2_addr; /* v2 B-tree address of creation order index */
+ void *parent; /* Flush dependency parent */
} H5A_bt2_ud_rm_t;
/*
@@ -140,6 +141,7 @@ typedef struct H5A_bt2_ud_rmbi_t {
H5HF_t *shared_fheap; /* Fractal heap handle for shared messages */
H5_index_t idx_type; /* Index type for operation */
haddr_t other_bt2_addr; /* v2 B-tree address of "other" index */
+ void *parent; /* Flush dependency parent */
} H5A_bt2_ud_rmbi_t;
@@ -183,7 +185,7 @@ typedef struct H5A_bt2_ud_rmbi_t {
*-------------------------------------------------------------------------
*/
herr_t
-H5A_dense_create(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo)
+H5A_dense_create(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo, void *parent)
{
H5HF_create_t fheap_cparam; /* Fractal heap creation parameters */
H5B2_create_t bt2_cparam; /* v2 B-tree creation parameters */
@@ -246,7 +248,7 @@ HDfprintf(stderr, "%s: fheap_id_len = %Zu\n", FUNC, fheap_id_len);
H5O_FHEAP_ID_LEN; /* Fractal heap ID */
bt2_cparam.split_percent = H5A_NAME_BT2_SPLIT_PERC;
bt2_cparam.merge_percent = H5A_NAME_BT2_MERGE_PERC;
- if(NULL == (bt2_name = H5B2_create(f, dxpl_id, &bt2_cparam, NULL)))
+ if(NULL == (bt2_name = H5B2_create(f, dxpl_id, &bt2_cparam, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "unable to create v2 B-tree for name index")
/* Retrieve the v2 B-tree's address in the file */
@@ -267,7 +269,7 @@ HDfprintf(stderr, "%s: ainfo->name_bt2_addr = %a\n", FUNC, ainfo->name_bt2_addr)
H5O_FHEAP_ID_LEN; /* Fractal heap ID */
bt2_cparam.split_percent = H5A_CORDER_BT2_SPLIT_PERC;
bt2_cparam.merge_percent = H5A_CORDER_BT2_MERGE_PERC;
- if(NULL == (bt2_corder = H5B2_create(f, dxpl_id, &bt2_cparam, NULL)))
+ if(NULL == (bt2_corder = H5B2_create(f, dxpl_id, &bt2_cparam, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "unable to create v2 B-tree for creation order index")
/* Retrieve the v2 B-tree's address in the file */
@@ -339,7 +341,8 @@ H5A__dense_fnd_cb(const H5A_t *attr, hbool_t *took_ownership, void *_user_attr)
*-------------------------------------------------------------------------
*/
H5A_t *
-H5A_dense_open(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *name)
+H5A_dense_open(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
+ const char *name, void *parent)
{
H5A_bt2_ud_common_t udata; /* User data for v2 B-tree modify */
H5HF_t *fheap = NULL; /* Fractal heap handle */
@@ -383,7 +386,7 @@ H5A_dense_open(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *na
} /* end if */
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, NULL, "unable to open v2 B-tree for name index")
/* Create the "udata" information for v2 B-tree record find */
@@ -442,6 +445,7 @@ H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
uint8_t attr_buf[H5A_ATTR_BUF_SIZE]; /* Buffer for serializing message */
unsigned mesg_flags = 0; /* Flags for storing message */
htri_t attr_sharable; /* Flag indicating attributes are sharable */
+ H5O_proxy_t *oh_proxy = NULL; /* Attribute's object header proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -527,8 +531,14 @@ H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINSERT, FAIL, "unable to insert attribute into fractal heap")
} /* end else */
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&attr->oloc, dxpl_id)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin attribute object header proxy")
+
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Create the callback information for v2 B-tree record insertion */
@@ -552,7 +562,7 @@ H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
if(ainfo->index_corder) {
/* Open the creation order index v2 B-tree */
HDassert(H5F_addr_defined(ainfo->corder_bt2_addr));
- if(NULL == (bt2_corder = H5B2_open(f, dxpl_id, ainfo->corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(f, dxpl_id, ainfo->corder_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Insert the record into the creation order index v2 B-tree */
@@ -562,6 +572,8 @@ H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(shared_fheap && H5HF_close(shared_fheap, dxpl_id) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap")
if(fheap && H5HF_close(fheap, dxpl_id) < 0)
@@ -636,6 +648,7 @@ H5A__dense_write_bt2_cb(void *_record, void *_op_data, hbool_t *changed)
H5B2_t *bt2_corder = NULL; /* v2 B-tree handle for creation order index */
H5WB_t *wb = NULL; /* Wrapped buffer for attribute data */
uint8_t attr_buf[H5A_ATTR_BUF_SIZE]; /* Buffer for serializing attribute */
+ H5O_proxy_t *oh_proxy = NULL; /* Attribute's object header proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -659,8 +672,14 @@ H5A__dense_write_bt2_cb(void *_record, void *_op_data, hbool_t *changed)
if(H5F_addr_defined(op_data->corder_bt2_addr)) {
H5A_bt2_ud_common_t udata; /* User data for v2 B-tree modify */
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(op_data->f) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&op_data->attr->oloc, op_data->dxpl_id)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin attribute object header proxy")
+
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2_corder = H5B2_open(op_data->f, op_data->dxpl_id, op_data->corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(op_data->f, op_data->dxpl_id, op_data->corder_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Create the "udata" information for v2 B-tree record modify */
@@ -721,6 +740,8 @@ H5A__dense_write_bt2_cb(void *_record, void *_op_data, hbool_t *changed)
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(bt2_corder && H5B2_close(bt2_corder, op_data->dxpl_id) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for creation order index")
if(wb && H5WB_unwrap(wb) < 0)
@@ -752,6 +773,7 @@ H5A_dense_write(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
H5HF_t *shared_fheap = NULL; /* Fractal heap handle for shared header messages */
H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
htri_t attr_sharable; /* Flag indicating attributes are sharable */
+ H5O_proxy_t *oh_proxy = NULL; /* Attribute's object header proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -789,8 +811,14 @@ H5A_dense_write(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
if(NULL == (fheap = H5HF_open(f, dxpl_id, ainfo->fheap_addr)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&attr->oloc, dxpl_id)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin attribute object header proxy")
+
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Create the "udata" information for v2 B-tree record modify */
@@ -819,6 +847,8 @@ H5A_dense_write(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(shared_fheap && H5HF_close(shared_fheap, dxpl_id) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close fractal heap")
if(fheap && H5HF_close(fheap, dxpl_id) < 0)
@@ -888,8 +918,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5A_dense_rename(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *old_name,
- const char *new_name)
+H5A_dense_rename(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
+ const char *old_name, const char *new_name, void *parent)
{
H5A_bt2_ud_common_t udata; /* User data for v2 B-tree modify */
H5HF_t *fheap = NULL; /* Fractal heap handle */
@@ -936,7 +966,7 @@ H5A_dense_rename(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Create the "udata" information for v2 B-tree record modify */
@@ -1011,7 +1041,7 @@ H5A_dense_rename(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *
HGOTO_ERROR(H5E_ATTR, H5E_WRITEERROR, FAIL, "error determining if message should be shared")
/* Delete old attribute from dense storage */
- if(H5A_dense_remove(f, dxpl_id, ainfo, old_name) < 0)
+ if(H5A_dense_remove(f, dxpl_id, ainfo, old_name, parent) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete attribute in dense storage")
done:
@@ -1140,8 +1170,9 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5A_dense_iterate(H5F_t *f, hid_t dxpl_id, hid_t loc_id, const H5O_ainfo_t *ainfo,
- H5_index_t idx_type, H5_iter_order_t order, hsize_t skip, hsize_t *last_attr,
+H5A_dense_iterate(H5F_t *f, hid_t dxpl_id, hid_t loc_id,
+ const H5O_ainfo_t *ainfo, H5_index_t idx_type, H5_iter_order_t order,
+ hsize_t skip, void *parent, hsize_t *last_attr,
const H5A_attr_iter_op_t *attr_op, void *op_data)
{
H5HF_t *fheap = NULL; /* Fractal heap handle */
@@ -1215,7 +1246,7 @@ H5A_dense_iterate(H5F_t *f, hid_t dxpl_id, hid_t loc_id, const H5O_ainfo_t *ainf
} /* end if */
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Construct the user data for v2 B-tree iterator callback */
@@ -1241,7 +1272,7 @@ H5A_dense_iterate(H5F_t *f, hid_t dxpl_id, hid_t loc_id, const H5O_ainfo_t *ainf
else {
/* Build the table of attributes for this object */
/* (build table using the name index, but sort according to idx_type) */
- if(H5A_dense_build_table(f, dxpl_id, ainfo, idx_type, order, &atable) < 0)
+ if(H5A_dense_build_table(f, dxpl_id, ainfo, idx_type, order, parent, &atable) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "error building table of attributes")
/* Iterate over attributes in table */
@@ -1291,7 +1322,7 @@ H5A__dense_remove_bt2_cb(const void *_record, void *_udata)
/* Check for removing the link from the creation order index */
if(H5F_addr_defined(udata->corder_bt2_addr)) {
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2_corder = H5B2_open(udata->common.f, udata->common.dxpl_id, udata->corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(udata->common.f, udata->common.dxpl_id, udata->corder_bt2_addr, NULL, udata->parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Set up the user data for the v2 B-tree 'record remove' callback */
@@ -1342,7 +1373,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5A_dense_remove(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *name)
+H5A_dense_remove(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
+ const char *name, void *parent)
{
H5A_bt2_ud_rm_t udata; /* User data for v2 B-tree record removal */
H5HF_t *fheap = NULL; /* Fractal heap handle */
@@ -1386,7 +1418,7 @@ H5A_dense_remove(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *
} /* end if */
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Set up the user data for the v2 B-tree 'record remove' callback */
@@ -1399,6 +1431,7 @@ H5A_dense_remove(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *
udata.common.found_op = H5A__dense_fnd_cb; /* v2 B-tree comparison callback */
udata.common.found_op_data = &attr_copy;
udata.corder_bt2_addr = ainfo->corder_bt2_addr;
+ udata.parent = parent;
/* Remove the record from the name index v2 B-tree */
if(H5B2_remove(bt2_name, dxpl_id, &udata, H5A__dense_remove_bt2_cb, &udata) < 0)
@@ -1500,7 +1533,7 @@ H5A__dense_remove_by_idx_bt2_cb(const void *_record, void *_bt2_udata)
} /* end else */
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(bt2_udata->f, bt2_udata->dxpl_id, bt2_udata->other_bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(bt2_udata->f, bt2_udata->dxpl_id, bt2_udata->other_bt2_addr, NULL, bt2_udata->parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Set the common information for the v2 B-tree remove operation */
@@ -1562,7 +1595,7 @@ done:
*/
herr_t
H5A_dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- H5_index_t idx_type, H5_iter_order_t order, hsize_t n)
+ H5_index_t idx_type, H5_iter_order_t order, hsize_t n, void *parent)
{
H5HF_t *fheap = NULL; /* Fractal heap handle */
H5HF_t *shared_fheap = NULL; /* Fractal heap handle for shared header messages */
@@ -1632,7 +1665,7 @@ H5A_dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
} /* end if */
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Set up the user data for the v2 B-tree 'record remove' callback */
@@ -1642,15 +1675,16 @@ H5A_dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
udata.shared_fheap = shared_fheap;
udata.idx_type = idx_type;
udata.other_bt2_addr = idx_type == H5_INDEX_NAME ? ainfo->corder_bt2_addr : ainfo->name_bt2_addr;
+ udata.parent = parent;
/* Remove the record from the name index v2 B-tree */
- if(H5B2_remove_by_idx(bt2, dxpl_id, order, n, H5A__dense_remove_by_idx_bt2_cb, &udata) < 0)
+ if(H5B2_remove_by_idx(bt2, dxpl_id, order, n, NULL, H5A__dense_remove_by_idx_bt2_cb, &udata) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTREMOVE, FAIL, "unable to remove attribute from v2 B-tree index")
} /* end if */
else {
/* Build the table of attributes for this object */
/* (build table using the name index, but sort according to idx_type) */
- if(H5A_dense_build_table(f, dxpl_id, ainfo, idx_type, order, &atable) < 0)
+ if(H5A_dense_build_table(f, dxpl_id, ainfo, idx_type, order, parent, &atable) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "error building table of attributes")
/* Check for skipping too many attributes */
@@ -1658,7 +1692,7 @@ H5A_dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified")
/* Delete appropriate attribute from dense storage */
- if(H5A_dense_remove(f, dxpl_id, ainfo, ((atable.attrs[n])->shared)->name) < 0)
+ if(H5A_dense_remove(f, dxpl_id, ainfo, ((atable.attrs[n])->shared)->name, parent) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete attribute in dense storage")
} /* end else */
@@ -1692,7 +1726,8 @@ done:
*-------------------------------------------------------------------------
*/
htri_t
-H5A_dense_exists(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *name)
+H5A_dense_exists(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
+ const char *name, void *parent)
{
H5A_bt2_ud_common_t udata; /* User data for v2 B-tree modify */
H5HF_t *fheap = NULL; /* Fractal heap handle */
@@ -1735,7 +1770,7 @@ H5A_dense_exists(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, const char *
} /* end if */
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Create the "udata" information for v2 B-tree record 'find' */
@@ -1846,7 +1881,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5A_dense_delete(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo)
+H5A_dense_delete(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo, void *parent)
{
H5A_bt2_ud_common_t udata; /* v2 B-tree user data for deleting attributes */
H5HF_t *fheap = NULL; /* Fractal heap handle */
@@ -1876,7 +1911,7 @@ H5A_dense_delete(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo)
udata.found_op_data = NULL;
/* Delete name index v2 B-tree */
- if(H5B2_delete(f, dxpl_id, ainfo->name_bt2_addr, NULL, H5A__dense_delete_bt2_cb, &udata) < 0)
+ if(H5B2_delete(f, dxpl_id, ainfo->name_bt2_addr, NULL, parent, H5A__dense_delete_bt2_cb, &udata) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree for name index")
ainfo->name_bt2_addr = HADDR_UNDEF;
@@ -1888,7 +1923,7 @@ H5A_dense_delete(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo)
/* Check if we should delete the creation order index v2 B-tree */
if(H5F_addr_defined(ainfo->corder_bt2_addr)) {
/* Delete the creation order index, without adjusting the ref. count on the attributes */
- if(H5B2_delete(f, dxpl_id, ainfo->corder_bt2_addr, NULL, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl_id, ainfo->corder_bt2_addr, NULL, parent, NULL, NULL) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree for creation order index")
ainfo->corder_bt2_addr = HADDR_UNDEF;
} /* end if */
diff --git a/src/H5Aint.c b/src/H5Aint.c
index baa352c..bde64c4 100644
--- a/src/H5Aint.c
+++ b/src/H5Aint.c
@@ -207,7 +207,7 @@ H5A_create(const H5G_loc_t *loc, const char *name, const H5T_t *type,
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "invalid datatype location")
/* Set the latest format for datatype, if requested */
- if(H5F_USE_LATEST_FORMAT(loc->oloc->file))
+ if(H5F_USE_LATEST_FLAGS(loc->oloc->file, H5F_LATEST_DATATYPE))
if(H5T_set_latest_version(attr->shared->dt) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of datatype")
@@ -215,7 +215,7 @@ H5A_create(const H5G_loc_t *loc, const char *name, const H5T_t *type,
attr->shared->ds = H5S_copy(space, FALSE, TRUE);
/* Set the latest format for dataspace, if requested */
- if(H5F_USE_LATEST_FORMAT(loc->oloc->file))
+ if(H5F_USE_LATEST_FLAGS(loc->oloc->file, H5F_LATEST_DATASPACE))
if(H5S_set_latest_version(attr->shared->ds) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of dataspace")
@@ -1407,7 +1407,8 @@ done:
*/
herr_t
H5A_dense_build_table(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- H5_index_t idx_type, H5_iter_order_t order, H5A_attr_table_t *atable)
+ H5_index_t idx_type, H5_iter_order_t order, void *parent,
+ H5A_attr_table_t *atable)
{
H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
hsize_t nrec; /* # of records in v2 B-tree */
@@ -1423,7 +1424,7 @@ H5A_dense_build_table(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
HDassert(atable);
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, parent)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Retrieve # of records in "name" B-tree */
@@ -1454,7 +1455,7 @@ H5A_dense_build_table(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
/* Iterate over the links in the group, building a table of the link messages */
if(H5A_dense_iterate(f, dxpl_id, (hid_t)0, ainfo, H5_INDEX_NAME,
- H5_ITER_NATIVE, (hsize_t)0, NULL, &attr_op, &udata) < 0)
+ H5_ITER_NATIVE, (hsize_t)0, parent, NULL, &attr_op, &udata) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "error building attribute table")
/* Sort attribute table in correct iteration order */
@@ -1787,8 +1788,9 @@ done:
htri_t
H5A_get_ainfo(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_ainfo_t *ainfo)
{
- H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
- htri_t ret_value = FAIL; /* Return value */
+ H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
+ htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, oh->cache_info.addr, FAIL)
@@ -1809,8 +1811,14 @@ H5A_get_ainfo(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_ainfo_t *ainfo)
if(ainfo->nattrs == HSIZET_MAX) {
/* Check if we are using "dense" attribute storage */
if(H5F_addr_defined(ainfo->fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(f, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo->name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Retrieve # of records in "name" B-tree */
@@ -1826,6 +1834,8 @@ H5A_get_ainfo(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_ainfo_t *ainfo)
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(bt2_name && H5B2_close(bt2_name, dxpl_id) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for name index")
@@ -1853,7 +1863,7 @@ herr_t
H5A_set_version(const H5F_t *f, H5A_t *attr)
{
hbool_t type_shared, space_shared; /* Flags to indicate that shared messages are used for this attribute */
- hbool_t use_latest_format; /* Flag indicating the newest file format should be used */
+ hbool_t use_latest_format; /* Flag indicating the latest attribute version support is enabled */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1862,8 +1872,8 @@ H5A_set_version(const H5F_t *f, H5A_t *attr)
HDassert(f);
HDassert(attr);
- /* Get the file's 'use the latest version of the format' flag */
- use_latest_format = H5F_USE_LATEST_FORMAT(f);
+ /* Get the file's 'use the latest attribute version support' flag */
+ use_latest_format = H5F_USE_LATEST_FLAGS(f, H5F_LATEST_ATTRIBUTE);
/* Check whether datatype and dataspace are shared */
if(H5O_msg_is_shared(H5O_DTYPE_ID, attr->shared->dt) > 0)
@@ -1878,7 +1888,7 @@ H5A_set_version(const H5F_t *f, H5A_t *attr)
/* Check which version to encode attribute with */
if(use_latest_format)
- attr->shared->version = H5O_ATTR_VERSION_LATEST; /* Write out latest version of format */
+ attr->shared->version = H5O_ATTR_VERSION_LATEST; /* Write out latest attribute version */
else if(attr->shared->encoding != H5T_CSET_ASCII)
attr->shared->version = H5O_ATTR_VERSION_3; /* Write version which includes the character encoding */
else if(type_shared || space_shared)
@@ -2354,9 +2364,9 @@ H5A_dense_post_copy_file_all(const H5O_loc_t *src_oloc, const H5O_ainfo_t *ainfo
attr_op.op_type = H5A_ATTR_OP_LIB;
attr_op.u.lib_op = H5A__dense_post_copy_file_cb;
-
- if(H5A_dense_iterate(src_oloc->file, dxpl_id, (hid_t)0, ainfo_src, H5_INDEX_NAME,
- H5_ITER_NATIVE, (hsize_t)0, NULL, &attr_op, &udata) < 0)
+ /*!FIXME must pass something for parent once SWMR works with H5Ocopy -NAF */
+ if(H5A_dense_iterate(src_oloc->file, dxpl_id, (hid_t)0, ainfo_src, H5_INDEX_NAME,
+ H5_ITER_NATIVE, (hsize_t)0, NULL, NULL, &attr_op, &udata) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "error building attribute table")
done:
diff --git a/src/H5Apkg.h b/src/H5Apkg.h
index b392497..ab036fa 100644
--- a/src/H5Apkg.h
+++ b/src/H5Apkg.h
@@ -207,26 +207,29 @@ H5_DLL herr_t H5A__read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid
H5_DLL ssize_t H5A__get_name(H5A_t *attr, size_t buf_size, char *buf);
/* Attribute "dense" storage routines */
-H5_DLL herr_t H5A_dense_create(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo);
+H5_DLL herr_t H5A_dense_create(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo,
+ void *parent);
H5_DLL H5A_t *H5A_dense_open(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- const char *name);
+ const char *name, void *parent);
H5_DLL herr_t H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
H5A_t *attr);
H5_DLL herr_t H5A_dense_write(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
H5A_t *attr);
H5_DLL herr_t H5A_dense_rename(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- const char *old_name, const char *new_name);
+ const char *old_name, const char *new_name, void *parent);
H5_DLL herr_t H5A_dense_iterate(H5F_t *f, hid_t dxpl_id, hid_t loc_id,
const H5O_ainfo_t *ainfo, H5_index_t idx_type, H5_iter_order_t order,
- hsize_t skip, hsize_t *last_attr, const H5A_attr_iter_op_t *attr_op,
+ hsize_t skip, void *parent, hsize_t *last_attr, const H5A_attr_iter_op_t *attr_op,
void *op_data);
-H5_DLL herr_t H5A_dense_remove(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- const char *name);
-H5_DLL herr_t H5A_dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- H5_index_t idx_type, H5_iter_order_t order, hsize_t n);
+H5_DLL herr_t H5A_dense_remove(H5F_t *f, hid_t dxpl_id,
+ const H5O_ainfo_t *ainfo, const char *name, void *parent);
+H5_DLL herr_t H5A_dense_remove_by_idx(H5F_t *f, hid_t dxpl_id,
+ const H5O_ainfo_t *ainfo, H5_index_t idx_type, H5_iter_order_t order,
+ hsize_t n, void *parent);
H5_DLL htri_t H5A_dense_exists(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo,
- const char *name);
-H5_DLL herr_t H5A_dense_delete(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo);
+ const char *name, void *parent);
+H5_DLL herr_t H5A_dense_delete(H5F_t *f, hid_t dxpl_id, H5O_ainfo_t *ainfo,
+ void *parent);
/* Attribute table operations */
@@ -234,7 +237,7 @@ H5_DLL herr_t H5A_compact_build_table(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
H5_index_t idx_type, H5_iter_order_t order, H5A_attr_table_t *atable);
H5_DLL herr_t H5A_dense_build_table(H5F_t *f, hid_t dxpl_id,
const H5O_ainfo_t *ainfo, H5_index_t idx_type, H5_iter_order_t order,
- H5A_attr_table_t *atable);
+ void *parent, H5A_attr_table_t *atable);
H5_DLL herr_t H5A_attr_iterate_table(const H5A_attr_table_t *atable,
hsize_t skip, hsize_t *last_attr, hid_t loc_id,
const H5A_attr_iter_op_t *attr_op, void *op_data);
diff --git a/src/H5B.c b/src/H5B.c
index 7f933cd..b000cbd 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -487,6 +487,7 @@ H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create B-tree")
cache_udata.f = f;
cache_udata.type = shared->type;
+ cache_udata.parent = bt_ud->bt->parent;
cache_udata.rc_shared = bt_ud->bt->rc_shared;
if(NULL == (split_bt_ud->bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree")
@@ -612,6 +613,8 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
/* Check if the root node split */
if(H5B_INS_NOOP == my_ins) {
+ /* The root node did not split - just update the flush dependency (if
+ * necessary) and exit */
HDassert(!split_bt_ud.bt);
HGOTO_DONE(SUCCEED)
} /* end if */
@@ -875,6 +878,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
/* Set up user data for cache callbacks */
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = bt;
cache_udata.rc_shared = rc_shared;
if(0 == bt->nchildren) {
@@ -1052,15 +1056,8 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
/*
* Handle changes/additions to children
*/
- if(H5B_INS_CHANGE == my_ins) {
- /*
- * The insertion simply changed the address for the child.
- */
- HDassert(!child_bt_ud.bt);
- HDassert(bt->level == 0);
- bt->child[idx] = new_child_bt_ud.addr;
- bt_ud->cache_flags |= H5AC__DIRTIED_FLAG;
- } else if(H5B_INS_LEFT == my_ins || H5B_INS_RIGHT == my_ins) {
+ HDassert(!(bt->level == 0) != !(child_bt_ud.bt));
+ if(H5B_INS_LEFT == my_ins || H5B_INS_RIGHT == my_ins) {
unsigned *tmp_bt_flags_ptr = NULL;
H5B_t *tmp_bt;
@@ -1087,7 +1084,17 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
/* Insert the child */
if(H5B__insert_child(tmp_bt, tmp_bt_flags_ptr, idx, new_child_bt_ud.addr, my_ins, md_key) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, H5B_INS_ERROR, "can't insert child")
- } /* end else-if */
+ } else {
+ if(H5B_INS_CHANGE == my_ins) {
+ /*
+ * The insertion simply changed the address for the child.
+ */
+ HDassert(!child_bt_ud.bt);
+ HDassert(bt->level == 0);
+ bt->child[idx] = new_child_bt_ud.addr;
+ bt_ud->cache_flags |= H5AC__DIRTIED_FLAG;
+ } /* end if */
+ } /* end if */
/*
* If this node split, return the mid key (the one that is shared
@@ -1270,7 +1277,6 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
FUNC_ENTER_STATIC
HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(type);
HDassert(type->decode);
HDassert(type->cmp3);
@@ -1431,9 +1437,9 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
bt->nchildren = 0;
/* Delete the node from disk (via the metadata cache) */
- bt_flags |= H5AC__DIRTIED_FLAG;
+ bt_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
H5_CHECK_OVERFLOW(shared->sizeof_rnode, size_t, hsize_t);
- if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, bt_flags | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0) {
+ if(H5AC_unprotect(f, dxpl_id, H5AC_BT, addr, bt, bt_flags | H5AC__DELETED_FLAG) < 0) {
bt = NULL;
bt_flags = H5AC__NO_FLAGS_SET;
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, H5B_INS_ERROR, "unable to free B-tree node")
@@ -2032,7 +2038,8 @@ done:
*-------------------------------------------------------------------------
*/
htri_t
-H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr)
+H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
+ void *parent)
{
H5B_t *bt = NULL; /* The B-tree */
H5UC_t *rc_shared; /* Ref-counted shared info */
@@ -2062,6 +2069,7 @@ H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr)
*/
cache_udata.f = f;
cache_udata.type = type;
+ cache_udata.parent = parent;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree node")
diff --git a/src/H5B2.c b/src/H5B2.c
index 7eb88c4..84ff8de 100644
--- a/src/H5B2.c
+++ b/src/H5B2.c
@@ -86,6 +86,8 @@ extern const H5B2_class_t H5G_BT2_CORDER[1];
extern const H5B2_class_t H5SM_INDEX[1];
extern const H5B2_class_t H5A_BT2_NAME[1];
extern const H5B2_class_t H5A_BT2_CORDER[1];
+extern const H5B2_class_t H5D_BT2[1];
+extern const H5B2_class_t H5D_BT2_FILT[1];
const H5B2_class_t *const H5B2_client_class_g[] = {
H5B2_TEST, /* 0 - H5B2_TEST_ID */
@@ -98,6 +100,8 @@ const H5B2_class_t *const H5B2_client_class_g[] = {
H5SM_INDEX, /* 7 - H5B2_SOHM_INDEX_ID */
H5A_BT2_NAME, /* 8 - H5B2_ATTR_DENSE_NAME_ID */
H5A_BT2_CORDER, /* 9 - H5B2_ATTR_DENSE_CORDER_ID */
+ H5D_BT2, /* 10 - H5B2_CDSET_ID */
+ H5D_BT2_FILT, /* 11 - H5B2_CDSET_FILT_ID */
};
@@ -130,7 +134,8 @@ H5FL_DEFINE_STATIC(H5B2_t);
*-------------------------------------------------------------------------
*/
H5B2_t *
-H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udata)
+H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
+ void *ctx_udata, void *parent)
{
H5B2_t *bt2 = NULL; /* Pointer to the B-tree */
H5B2_hdr_t *hdr = NULL; /* Pointer to the B-tree header */
@@ -150,7 +155,7 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat
HDcompile_assert(H5B2_NUM_BTREE_ID == NELMTS(H5B2_client_class_g));
/* Create shared v2 B-tree header */
- if(HADDR_UNDEF == (hdr_addr = H5B2__hdr_create(f, dxpl_id, cparam, ctx_udata)))
+ if(HADDR_UNDEF == (hdr_addr = H5B2__hdr_create(f, dxpl_id, cparam, ctx_udata, parent)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't create v2 B-tree header")
/* Create v2 B-tree wrapper */
@@ -159,6 +164,7 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat
/* Look up the B-tree header */
cache_udata.f = f;
+ cache_udata.parent = parent;
cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = ctx_udata;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
@@ -205,7 +211,7 @@ done:
*-------------------------------------------------------------------------
*/
H5B2_t *
-H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata)
+H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata, void *parent)
{
H5B2_t *bt2 = NULL; /* Pointer to the B-tree */
H5B2_hdr_t *hdr = NULL; /* Pointer to the B-tree header */
@@ -220,6 +226,7 @@ H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata)
/* Look up the B-tree header */
cache_udata.f = f;
+ cache_udata.parent = parent;
cache_udata.addr = addr;
cache_udata.ctx_udata = ctx_udata;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
@@ -293,7 +300,7 @@ H5B2_insert(H5B2_t *bt2, hid_t dxpl_id, void *udata)
/* Check if the root node is allocated yet */
if(!H5F_addr_defined(hdr->root.addr)) {
/* Create root node as leaf node in B-tree */
- if(H5B2__create_leaf(hdr, dxpl_id, &(hdr->root)) < 0)
+ if(H5B2__create_leaf(hdr, dxpl_id, hdr, &(hdr->root)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create root node")
} /* end if */
/* Check if we need to split the root node (equiv. to a 1->2 node split) */
@@ -305,11 +312,11 @@ H5B2_insert(H5B2_t *bt2, hid_t dxpl_id, void *udata)
/* Attempt to insert record into B-tree */
if(hdr->depth > 0) {
- if(H5B2__insert_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, H5B2_POS_ROOT, udata) < 0)
+ if(H5B2__insert_internal(hdr, dxpl_id, hdr->depth, NULL, &hdr->root, H5B2_POS_ROOT, hdr, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node")
} /* end if */
else {
- if(H5B2__insert_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata) < 0)
+ if(H5B2__insert_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, hdr, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node")
} /* end else */
@@ -391,7 +398,7 @@ H5B2_iterate(H5B2_t *bt2, hid_t dxpl_id, H5B2_operator_t op, void *op_data)
/* Iterate through records */
if(hdr->root.node_nrec > 0) {
/* Iterate through nodes */
- if((ret_value = H5B2__iterate_node(hdr, dxpl_id, hdr->depth, &hdr->root, op, op_data)) < 0)
+ if((ret_value = H5B2__iterate_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr, op, op_data)) < 0)
HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed");
} /* end if */
@@ -428,6 +435,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
+ void *parent = NULL; /* Parent of current node */
uint16_t depth; /* Current depth of the tree */
int cmp; /* Comparison value of records */
unsigned idx; /* Location of record which matches key */
@@ -477,6 +485,10 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
/* Current depth of the tree */
depth = hdr->depth;
+ /* Set initial parent, if doing swmr writes */
+ if(hdr->swmr_write)
+ parent = hdr;
+
/* Walk down B-tree to find record or leaf node where record is located */
cmp = -1;
curr_pos = H5B2_POS_ROOT;
@@ -485,9 +497,16 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Locate node pointer for child */
cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
@@ -516,9 +535,13 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
} /* end if */
/* Unlock current node */
- if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ /* Keep track of parent if necessary */
+ if(hdr->swmr_write)
+ parent = internal;
+
/* Set pointer to next node to load */
curr_node_ptr = next_node_ptr;
} /* end if */
@@ -548,9 +571,16 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Locate record */
cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
@@ -600,6 +630,12 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
} /* end block */
done:
+ if(parent) {
+ HDassert(ret_value < 0);
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2_find() */
@@ -628,6 +664,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
+ void *parent = NULL; /* Parent of current node */
uint16_t depth; /* Current depth of the tree */
herr_t ret_value = SUCCEED; /* Return value */
@@ -657,6 +694,10 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
/* Current depth of the tree */
depth = hdr->depth;
+ /* Set initial parent, if doing swmr writes */
+ if(hdr->swmr_write)
+ parent = hdr;
+
/* Check for reverse indexing and map requested index to appropriate forward index */
if(order == H5_ITER_DEC)
idx = curr_node_ptr.all_nrec - (idx + 1);
@@ -668,9 +709,16 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
unsigned u; /* Local index variable */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Search for record with correct index */
for(u = 0; u < internal->nrec; u++) {
/* Check if record is in child node */
@@ -679,9 +727,13 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
next_node_ptr = internal->node_ptrs[u];
/* Unlock current node */
- if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ /* Keep track of parent if necessary */
+ if(hdr->swmr_write)
+ parent = internal;
+
/* Set pointer to next node to load */
curr_node_ptr = next_node_ptr;
@@ -721,9 +773,13 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
next_node_ptr = internal->node_ptrs[u];
/* Unlock current node */
- if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ /* Keep track of parent if necessary */
+ if(hdr->swmr_write)
+ parent = internal;
+
/* Set pointer to next node to load */
curr_node_ptr = next_node_ptr;
} /* end if */
@@ -740,9 +796,16 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Sanity check index */
HDassert(idx < leaf->nrec);
@@ -761,6 +824,12 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
} /* end block */
done:
+ if(parent) {
+ HDassert(ret_value < 0);
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2_index() */
@@ -804,8 +873,9 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op,
if(hdr->depth > 0) {
hbool_t depth_decreased = FALSE; /* Flag to indicate whether the depth of the B-tree decreased */
- if(H5B2__remove_internal(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth,
- &(hdr->cache_info), NULL, H5B2_POS_ROOT, &hdr->root, udata, op, op_data) < 0)
+ if(H5B2__remove_internal(hdr, dxpl_id, &depth_decreased, NULL, NULL,
+ hdr->depth, &(hdr->cache_info), NULL, H5B2_POS_ROOT, &hdr->root,
+ udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
/* Check for decreasing the depth of the B-tree */
@@ -823,7 +893,7 @@ H5B2_remove(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_remove_t op,
} /* end for */
} /* end if */
else {
- if(H5B2__remove_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, udata, op, op_data) < 0)
+ if(H5B2__remove_leaf(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, hdr, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -844,6 +914,12 @@ done:
*
* Purpose: Removes the n'th record from a B-tree.
*
+ * The 'udata' parameter is only used to pass through to the
+ * crt_flush_dep and upd_flush_dep callbacks, so it only
+ * needs to contain enough information for those (if any - it
+ * can be NULL). Specifically, it does not need to identify
+ * the specific record to search for.
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
@@ -854,7 +930,7 @@ done:
*/
herr_t
H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order,
- hsize_t idx, H5B2_remove_t op, void *op_data)
+ hsize_t idx, void *udata, H5B2_remove_t op, void *op_data)
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
herr_t ret_value = SUCCEED; /* Return value */
@@ -886,8 +962,9 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order,
if(hdr->depth > 0) {
hbool_t depth_decreased = FALSE; /* Flag to indicate whether the depth of the B-tree decreased */
- if(H5B2__remove_internal_by_idx(hdr, dxpl_id, &depth_decreased, NULL, hdr->depth,
- &(hdr->cache_info), NULL, &hdr->root, H5B2_POS_ROOT, idx, op, op_data) < 0)
+ if(H5B2__remove_internal_by_idx(hdr, dxpl_id, &depth_decreased, NULL,
+ NULL, hdr->depth, &(hdr->cache_info), NULL, &hdr->root,
+ H5B2_POS_ROOT, idx, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
/* Check for decreasing the depth of the B-tree */
@@ -905,7 +982,7 @@ H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order,
} /* end for */
} /* end if */
else {
- if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, (unsigned)idx, op, op_data) < 0)
+ if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, &hdr->root, H5B2_POS_ROOT, hdr, (unsigned)idx, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -1000,11 +1077,11 @@ H5B2_neighbor(H5B2_t *bt2, hid_t dxpl_id, H5B2_compare_t range, void *udata,
/* Attempt to find neighbor record in B-tree */
if(hdr->depth > 0) {
- if(H5B2__neighbor_internal(hdr, dxpl_id, hdr->depth, &hdr->root, NULL, range, udata, op, op_data) < 0)
+ if(H5B2__neighbor_internal(hdr, dxpl_id, hdr->depth, &hdr->root, NULL, range, hdr, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node")
} /* end if */
else {
- if(H5B2__neighbor_leaf(hdr, dxpl_id, &hdr->root, NULL, range, udata, op, op_data) < 0)
+ if(H5B2__neighbor_leaf(hdr, dxpl_id, &hdr->root, NULL, range, hdr, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node")
} /* end else */
@@ -1039,6 +1116,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
+ void *parent = NULL; /* Parent of current node */
H5B2_nodepos_t curr_pos; /* Position of current node */
uint16_t depth; /* Current depth of the tree */
int cmp; /* Comparison value of records */
@@ -1067,6 +1145,10 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
/* Current depth of the tree */
depth = hdr->depth;
+ /* Set initial parent, if doing swmr writes */
+ if(hdr->swmr_write)
+ parent = hdr;
+
/* Walk down B-tree to find record or leaf node where record is located */
cmp = -1;
curr_pos = H5B2_POS_ROOT;
@@ -1076,9 +1158,16 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__NO_FLAGS_SET)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Locate node pointer for child */
cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
@@ -1107,9 +1196,13 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
} /* end if */
/* Unlock current node */
- if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ /* Keep track of parent if necessary */
+ if(hdr->swmr_write)
+ parent = internal;
+
/* Set pointer to next node to load */
curr_node_ptr = next_node_ptr;
} /* end if */
@@ -1148,9 +1241,16 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
hbool_t changed = FALSE;/* Whether the 'modify' callback changed the record */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Locate record */
cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
@@ -1213,6 +1313,12 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
}
done:
+ if(parent) {
+ HDassert(ret_value < 0);
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2_modify() */
@@ -1341,7 +1447,7 @@ done:
*/
herr_t
H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata,
- H5B2_remove_t op, void *op_data)
+ void *parent, H5B2_remove_t op, void *op_data)
{
H5B2_hdr_t *hdr = NULL; /* Pointer to the B-tree header */
H5B2_hdr_cache_ud_t cache_udata; /* User-data for callback */
@@ -1358,6 +1464,7 @@ H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata,
HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr);
#endif /* QAK */
cache_udata.f = f;
+ cache_udata.parent = parent;
cache_udata.addr = addr;
cache_udata.ctx_udata = ctx_udata;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
@@ -1388,3 +1495,93 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2_delete() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2_depend
+ *
+ * Purpose: Make a child flush dependency between the v2 B-tree's
+ * header and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5B2_depend(H5AC_info_t *parent_entry, H5B2_t *bt2)
+{
+ /* Local variables */
+ H5B2_hdr_t *hdr = bt2->hdr; /* Header for B-tree */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(SUCCEED)
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(bt2);
+ HDassert(hdr);
+
+ /* Set the shared v2 B-tree header's file context for this operation */
+ bt2->hdr->f = bt2->f;
+
+ /* Set up flush dependency between parent entry and B-tree header */
+ if(H5B2__create_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on file metadata")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2_undepend
+ *
+ * Purpose: Remove a child flush dependency between the v2 B-tree's
+ * header and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5B2_undepend(H5AC_info_t *parent_entry, H5B2_t *bt2)
+{
+ /* Local variables */
+ H5B2_hdr_t *hdr = bt2->hdr; /* Header for B-tree */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(SUCCEED)
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(bt2);
+ HDassert(hdr);
+
+ /* Set the shared v2 B-tree header's file context for this operation */
+ bt2->hdr->f = bt2->f;
+
+ /* Remove flush dependency between parent entry and B-tree header */
+ if(H5B2__destroy_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency on file metadata")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2_undepend() */
+
diff --git a/src/H5B2cache.c b/src/H5B2cache.c
index 57f794b..58e76af 100644
--- a/src/H5B2cache.c
+++ b/src/H5B2cache.c
@@ -65,31 +65,43 @@
/********************/
/* Metadata cache callbacks */
-static herr_t H5B2__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5B2__cache_hdr_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5B2__cache_hdr_verify_chksum(const void *image_ptr, size_t len, void *udata);
static void *H5B2__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5B2__cache_hdr_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5B2__cache_hdr_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5B2__cache_hdr_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5B2__cache_hdr_free_icr(void *thing);
-static herr_t H5B2__cache_int_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5B2__cache_int_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5B2__cache_int_verify_chksum(const void *image_ptr, size_t len, void *udata);
static void *H5B2__cache_int_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5B2__cache_int_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5B2__cache_int_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5B2__cache_int_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5B2__cache_int_free_icr(void *thing);
-static herr_t H5B2__cache_leaf_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5B2__cache_leaf_get_load_size(const void * mage_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5B2__cache_leaf_verify_chksum(const void *image_ptr, size_t len, void *udata);
static void *H5B2__cache_leaf_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5B2__cache_leaf_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5B2__cache_leaf_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5B2__cache_leaf_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5B2__cache_leaf_free_icr(void *thing);
/*********************/
@@ -103,11 +115,12 @@ const H5AC_class_t H5AC_BT2_HDR[1] = {{
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5B2__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5B2__cache_hdr_verify_chksum,
H5B2__cache_hdr_deserialize, /* 'deserialize' callback */
H5B2__cache_hdr_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5B2__cache_hdr_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5B2__cache_hdr_notify, /* 'notify' callback */
H5B2__cache_hdr_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -120,11 +133,12 @@ const H5AC_class_t H5AC_BT2_INT[1] = {{
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5B2__cache_int_get_load_size, /* 'get_load_size' callback */
+ H5B2__cache_int_verify_chksum, /* 'verify_chksum' callback */
H5B2__cache_int_deserialize, /* 'deserialize' callback */
H5B2__cache_int_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5B2__cache_int_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5B2__cache_int_notify, /* 'notify' callback */
H5B2__cache_int_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -137,11 +151,12 @@ const H5AC_class_t H5AC_BT2_LEAF[1] = {{
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5B2__cache_leaf_get_load_size, /* 'get_load_size' callback */
+ H5B2__cache_leaf_verify_chksum, /* 'verify_chksum' callback */
H5B2__cache_leaf_deserialize, /* 'deserialize' callback */
H5B2__cache_leaf_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5B2__cache_leaf_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5B2__cache_leaf_notify, /* 'notify' callback */
H5B2__cache_leaf_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -173,24 +188,68 @@ const H5AC_class_t H5AC_BT2_LEAF[1] = {{
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
+H5B2__cache_hdr_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5B2_hdr_cache_ud_t *udata = (const H5B2_hdr_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5B2_hdr_cache_ud_t *udata = (H5B2_hdr_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
HDassert(udata);
+ HDassert(udata->f);
HDassert(image_len);
- /* Set the image length size */
- *image_len = H5B2_HEADER_SIZE_FILE(udata->f);
+ if(image == NULL) {
+ /* Set the image length size */
+ *image_len = H5B2_HEADER_SIZE_FILE(udata->f);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *actual_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B2__cache_hdr_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_hdr_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5B2__cache_hdr_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__cache_hdr_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B2__cache_hdr_deserialize
*
* Purpose: Loads a B-tree header from the disk.
@@ -214,7 +273,6 @@ H5B2__cache_hdr_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
H5B2_subid_t id; /* ID of B-tree class, as found in file */
uint16_t depth; /* Depth of B-tree */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
H5B2_hdr_t *ret_value = NULL; /* Return value */
@@ -260,22 +318,17 @@ H5B2__cache_hdr_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
UINT16DECODE(image, hdr->root.node_nrec);
H5F_DECODE_LENGTH(udata->f, image, hdr->root.all_nrec);
+ /* checksum verification already done in verify_chksum cb */
+
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == hdr->hdr_size);
- /* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(_image, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
-
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "incorrect metadata checksum for v2 B-tree header")
-
/* Initialize B-tree header info */
cparam.cls = H5B2_client_class_g[id];
- if(H5B2__hdr_init(hdr, &cparam, udata->ctx_udata, depth) < 0)
+ if(H5B2__hdr_init(hdr, &cparam, udata->ctx_udata, udata->parent, depth) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't initialize B-tree header info")
/* Set the B-tree header's address */
@@ -395,11 +448,93 @@ H5B2__cache_hdr_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED le
/* Sanity check */
HDassert((size_t)(image - (uint8_t *)_image) == len);
+ /* Clear shadowed node lists, as the header has been flushed and all
+ * nodes must be shadowed again (if doing SWMR writes). Note that this
+ * algorithm does one extra iteration at the end, as the last node's
+ * shadowed_next pointer points to itself. */
+ while(hdr->shadowed_internal) {
+ H5B2_internal_t *next = hdr->shadowed_internal->shadowed_next;
+
+ HDassert(!hdr->shadowed_internal->cache_info.is_dirty);
+ hdr->shadowed_internal->shadowed_next = NULL;
+ hdr->shadowed_internal->shadowed_prev = NULL;
+ hdr->shadowed_internal = next;
+ } /* end while */
+ while(hdr->shadowed_leaf) {
+ H5B2_leaf_t *next = hdr->shadowed_leaf->shadowed_next;
+
+ HDassert(!hdr->shadowed_leaf->cache_info.is_dirty);
+ hdr->shadowed_leaf->shadowed_next = NULL;
+ hdr->shadowed_leaf->shadowed_prev = NULL;
+ hdr->shadowed_leaf = next;
+ } /* end while */
+
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5B2__cache_hdr_serialize() */
/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_hdr_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * nfortne2@hdfgroup.org
+ * Apr 24 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B2__cache_hdr_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5B2_hdr_t *hdr = (H5B2_hdr_t *)_thing;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(hdr->swmr_write) {
+ HDassert(hdr->parent);
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5B2__create_flush_depend((H5AC_info_t *)hdr->parent, (H5AC_info_t *)hdr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)hdr->parent, (H5AC_info_t *)hdr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__cache_hdr_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B2__cache_hdr_free_icr
*
* Purpose: Destroy/release an "in core representation" of a data
@@ -446,9 +581,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_int_get_load_size(const void *_udata, size_t *image_len)
+H5B2__cache_int_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5B2_internal_cache_ud_t *udata = (const H5B2_internal_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5B2_internal_cache_ud_t *udata = (H5B2_internal_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -457,12 +594,58 @@ H5B2__cache_int_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->hdr);
HDassert(image_len);
- /* Set the image length size */
- *image_len = udata->hdr->node_size;
+ if(image == NULL) {
+ /* Set the image length size */
+ *image_len = udata->hdr->node_size;
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B2__cache_int_get_load_size() */
+/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_int_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5B2__cache_int_verify_chksum(const void *_image, size_t H5_ATTR_UNUSED len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5B2_internal_cache_ud_t *udata = (H5B2_internal_cache_ud_t *)_udata; /* Pointer to user data */
+ size_t chk_size; /* Exact size of the node with checksum at the end */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+
+ /* Internal node prefix header + records + child pointer triplets: size with checksum at the end */
+ chk_size = H5B2_INT_PREFIX_SIZE + (udata->nrec * udata->hdr->rrec_size) + ((size_t)(udata->nrec + 1) * H5B2_INT_POINTER_SIZE(udata->hdr, udata->depth));
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, chk_size, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__cache_int_verify_chksum() */
+
/*-------------------------------------------------------------------------
* Function: H5B2__cache_int_deserialize
@@ -488,7 +671,6 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
uint8_t *native; /* Pointer to native record info */
H5B2_node_ptr_t *int_node_ptr; /* Pointer to node pointer info */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
unsigned u; /* Local index variable */
H5B2_internal_t *ret_value = NULL; /* Return value */
@@ -509,6 +691,9 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
/* Share B-tree information */
internal->hdr = udata->hdr;
+ internal->parent = udata->parent;
+ internal->shadowed_next = NULL;
+ internal->shadowed_prev = NULL;
/* Magic number */
if(HDmemcmp(image, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC))
@@ -562,8 +747,7 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
int_node_ptr++;
} /* end for */
- /* Compute checksum on internal node */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -571,10 +755,6 @@ H5B2__cache_int_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
/* Sanity check parsing */
HDassert((size_t)(image - (const uint8_t *)_image) <= len);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "incorrect metadata checksum for v2 internal node")
-
/* Set return value */
ret_value = internal;
@@ -709,6 +889,68 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_int_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * nfortne2@hdfgroup.org
+ * Apr 25 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B2__cache_int_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5B2_internal_t *internal = (H5B2_internal_t *)_thing;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /*
+ * Check arguments.
+ */
+ HDassert(internal);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(internal->hdr->swmr_write) {
+ HDassert(internal->parent);
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5B2__create_flush_depend((H5AC_info_t *)internal->parent, (H5AC_info_t *)internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)internal->parent, (H5AC_info_t *)internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__cache_int_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B2__cache_int_free_icr
*
* Purpose: Destroy/release an "in core representation" of a data
@@ -723,17 +965,43 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_int_free_icr(void *thing)
+H5B2__cache_int_free_icr(void *_thing)
{
+ H5B2_internal_t *internal = (H5B2_internal_t *)_thing;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(thing);
+ HDassert(internal);
+ HDassert(internal->hdr);
+
+ /* Unlink from shadowed list */
+ if(internal->shadowed_next) {
+ if(internal->shadowed_next != internal) {
+ internal->shadowed_next->shadowed_prev = internal->shadowed_prev;
+
+ if(internal->shadowed_prev)
+ internal->shadowed_prev->shadowed_next = internal->shadowed_next;
+ else {
+ HDassert(internal->hdr->shadowed_internal = internal);
+
+ internal->hdr->shadowed_internal = internal->shadowed_next;
+ } /* end else */
+ } /* end if */
+ else {
+ if(internal->shadowed_prev)
+ internal->shadowed_prev->shadowed_next = internal->shadowed_prev;
+ else {
+ HDassert(internal->hdr->shadowed_internal = internal);
+
+ internal->hdr->shadowed_internal = NULL;
+ } /* end else */
+ } /* end else */
+ } /* end if */
/* Release v2 B-tree internal node */
- if(H5B2__internal_free((H5B2_internal_t *)thing) < 0)
+ if(H5B2__internal_free(internal) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to release v2 B-tree internal node")
done:
@@ -755,9 +1023,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_leaf_get_load_size(const void *_udata, size_t *image_len)
+H5B2__cache_leaf_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5B2_leaf_cache_ud_t *udata = (const H5B2_leaf_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5B2_leaf_cache_ud_t *udata = (H5B2_leaf_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -766,12 +1036,58 @@ H5B2__cache_leaf_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->hdr);
HDassert(image_len);
- /* Set the image length size */
- *image_len = udata->hdr->node_size;
+ if(image == NULL) {
+ /* Set the image length size */
+ *image_len = udata->hdr->node_size;
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B2__cache_leaf_get_load_size() */
+/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_leaf_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5B2__cache_leaf_verify_chksum(const void *_image, size_t H5_ATTR_UNUSED len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5B2_internal_cache_ud_t *udata = (H5B2_internal_cache_ud_t *)_udata; /* Pointer to user data */
+ size_t chk_size; /* Exact size of the node with checksum at the end */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+
+ /* Leaf node prefix header + records: size with checksum at the end */
+ chk_size = H5B2_LEAF_PREFIX_SIZE + (udata->nrec * udata->hdr->rrec_size);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, chk_size, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__cache_leaf_verify_chksum() */
+
/*-------------------------------------------------------------------------
* Function: H5B2__cache_leaf_deserialize
@@ -796,7 +1112,6 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint8_t *native; /* Pointer to native keys */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
unsigned u; /* Local index variable */
H5B2_leaf_t *ret_value = NULL; /* Return value */
@@ -817,6 +1132,9 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
/* Share B-tree header information */
leaf->hdr = udata->hdr;
+ leaf->parent = udata->parent;
+ leaf->shadowed_next = NULL;
+ leaf->shadowed_prev = NULL;
/* Magic number */
if(HDmemcmp(image, H5B2_LEAF_MAGIC, (size_t)H5_SIZEOF_MAGIC))
@@ -850,8 +1168,7 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
native += udata->hdr->cls->nrec_size;
} /* end for */
- /* Compute checksum on leaf node */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -859,10 +1176,6 @@ H5B2__cache_leaf_deserialize(const void *_image, size_t H5_ATTR_UNUSED len,
/* Sanity check parsing */
HDassert((size_t)(image - (const uint8_t *)_image) <= udata->hdr->node_size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "incorrect metadata checksum for v2 leaf node")
-
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) <= len);
@@ -986,6 +1299,67 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_leaf_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * nfortne2@hdfgroup.org
+ * Apr 25 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B2__cache_leaf_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5B2_leaf_t *leaf = (H5B2_leaf_t *)_thing;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /*
+ * Check arguments.
+ */
+ HDassert(leaf);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(leaf->hdr->swmr_write) {
+ HDassert(leaf->parent);
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5B2__create_flush_depend((H5AC_info_t *)leaf->parent, (H5AC_info_t *)leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)leaf->parent, (H5AC_info_t *)leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__cache_leaf_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5B2__cache_leaf_free_icr
*
* Purpose: Destroy/release an "in core representation" of a data
@@ -1000,17 +1374,43 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__cache_leaf_free_icr(void *thing)
+H5B2__cache_leaf_free_icr(void *_thing)
{
+ H5B2_leaf_t *leaf = (H5B2_leaf_t *)_thing;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Check arguments */
- HDassert(thing);
+ HDassert(leaf);
+ HDassert(leaf->hdr);
+
+ /* Unlink from shadowed list */
+ if(leaf->shadowed_next) {
+ if(leaf->shadowed_next != leaf) {
+ leaf->shadowed_next->shadowed_prev = leaf->shadowed_prev;
+
+ if(leaf->shadowed_prev)
+ leaf->shadowed_prev->shadowed_next = leaf->shadowed_next;
+ else {
+ HDassert(leaf->hdr->shadowed_leaf = leaf);
+
+ leaf->hdr->shadowed_leaf = leaf->shadowed_next;
+ } /* end else */
+ } /* end if */
+ else {
+ if(leaf->shadowed_prev)
+ leaf->shadowed_prev->shadowed_next = leaf->shadowed_prev;
+ else {
+ HDassert(leaf->hdr->shadowed_leaf = leaf);
+
+ leaf->hdr->shadowed_leaf = NULL;
+ } /* end else */
+ } /* end else */
+ } /* end if */
/* Destroy v2 B-tree leaf node */
- if(H5B2__leaf_free((H5B2_leaf_t *)thing) < 0)
+ if(H5B2__leaf_free(leaf) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree leaf node")
done:
diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c
index ad9f970..c7dc165 100644
--- a/src/H5B2dbg.c
+++ b/src/H5B2dbg.c
@@ -126,6 +126,7 @@ H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
cache_udata.f = f;
cache_udata.addr = addr;
cache_udata.ctx_udata = dbg_ctx;
+ cache_udata.parent = NULL;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header")
@@ -245,6 +246,7 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
cache_udata.f = f;
cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = dbg_ctx;
+ cache_udata.parent = NULL;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header")
@@ -256,7 +258,7 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
*/
H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
H5_CHECK_OVERFLOW(depth, unsigned, uint16_t);
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, NULL, (uint16_t)nrec, (uint16_t)depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree internal node")
/* Print opening message */
@@ -376,6 +378,7 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent
cache_udata.f = f;
cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = dbg_ctx;
+ cache_udata.parent = NULL;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree header")
@@ -386,7 +389,7 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent
* Load the B-tree leaf node
*/
H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, NULL, (uint16_t)nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Print opening message */
diff --git a/src/H5B2hdr.c b/src/H5B2hdr.c
index 16d8467..f3945bc 100644
--- a/src/H5B2hdr.c
+++ b/src/H5B2hdr.c
@@ -109,7 +109,7 @@ H5FL_SEQ_DEFINE(H5B2_node_info_t);
*/
herr_t
H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata,
- uint16_t depth)
+ void *parent, uint16_t depth)
{
size_t sz_max_nrec; /* Temporary variable for range checking */
unsigned u_max_nrec_size; /* Temporary variable for range checking */
@@ -133,6 +133,7 @@ H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata,
HDassert(cparam->merge_percent < (cparam->split_percent / 2));
/* Initialize basic information */
+ hdr->parent = parent;
hdr->rc = 0;
hdr->pending_delete = FALSE;
@@ -208,6 +209,16 @@ HDmemset(hdr->page, 0, hdr->node_size);
} /* end for */
} /* end if */
+ /* Determine if we are doing SWMR writes. Only enable for data chunks for
+ * now. */
+ hdr->swmr_write = (H5F_INTENT(hdr->f) & H5F_ACC_SWMR_WRITE) > 0
+ && (hdr->cls->id == H5B2_CDSET_ID
+ || hdr->cls->id == H5B2_CDSET_FILT_ID);
+
+ /* Clear the shadowed list pointers */
+ hdr->shadowed_leaf = NULL;
+ hdr->shadowed_internal = NULL;
+
/* Create the callback context, if the callback exists */
if(hdr->cls->crt_context) {
if(NULL == (hdr->cb_ctx = (*hdr->cls->crt_context)(ctx_udata)))
@@ -284,7 +295,7 @@ done:
*/
haddr_t
H5B2__hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
- void *ctx_udata)
+ void *ctx_udata, void *parent)
{
H5B2_hdr_t *hdr = NULL; /* The new v2 B-tree header information */
haddr_t ret_value = HADDR_UNDEF; /* Return value */
@@ -302,7 +313,7 @@ H5B2__hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed for B-tree header")
/* Initialize shared B-tree info */
- if(H5B2__hdr_init(hdr, cparam, ctx_udata, (uint16_t)0) < 0)
+ if(H5B2__hdr_init(hdr, cparam, ctx_udata, parent, (uint16_t)0) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, HADDR_UNDEF, "can't create shared B-tree info")
/* Allocate space for the header on disk */
@@ -601,7 +612,7 @@ H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id)
/* Delete all nodes in B-tree */
if(H5F_addr_defined(hdr->root.addr))
- if(H5B2__delete_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr->remove_op, hdr->remove_op_data) < 0)
+ if(H5B2__delete_node(hdr, dxpl_id, hdr->depth, &hdr->root, hdr, hdr->remove_op, hdr->remove_op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to delete B-tree nodes")
/* Indicate that the heap header should be deleted & file space freed */
diff --git a/src/H5B2int.c b/src/H5B2int.c
index b8c9634..f7927db 100644
--- a/src/H5B2int.c
+++ b/src/H5B2int.c
@@ -78,7 +78,11 @@ static herr_t H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
static herr_t H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal, unsigned *internal_flags_ptr, unsigned idx,
void *swap_loc);
-static herr_t H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+static herr_t H5B2__shadow_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, H5B2_internal_t **internal);
+static herr_t H5B2__shadow_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_leaf_t **leaf);
+static herr_t H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent,
H5B2_node_ptr_t *node_ptr, uint16_t depth);
#ifdef H5B2_DEBUG
static herr_t H5B2__assert_leaf(const H5B2_hdr_t *hdr, const H5B2_leaf_t *leaf);
@@ -180,12 +184,16 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5AC_class_t *child_class; /* Pointer to child node's class info */
haddr_t left_addr, right_addr; /* Addresses of left & right child nodes */
void *left_child = NULL, *right_child = NULL; /* Pointers to child nodes */
+ const H5AC_class_t *grandchild_class; /* Pointer to grandchild node's class info */
+ haddr_t grandchild_addr; /* Grandchild address */
+ void *grandchild = NULL; /* Pointer to grandchild node */
uint16_t *left_nrec, *right_nrec; /* Pointers to child # of records */
uint8_t *left_native, *right_native;/* Pointers to childs' native records */
H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */
uint16_t mid_record; /* Index of "middle" record in current node */
uint16_t old_node_nrec; /* Number of records in internal node split */
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -207,7 +215,7 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Create new internal node */
internal->node_ptrs[idx + 1].all_nrec = internal->node_ptrs[idx + 1].node_nrec = 0;
- if(H5B2__create_internal(hdr, dxpl_id, &(internal->node_ptrs[idx + 1]), (uint16_t)(depth - 1)) < 0)
+ if(H5B2__create_internal(hdr, dxpl_id, internal, &(internal->node_ptrs[idx + 1]), (uint16_t)(depth - 1)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node")
/* Setup information for unlocking child nodes */
@@ -216,11 +224,18 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
- if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ /* Shadow the left node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx]), &left_int) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ left_addr = internal->node_ptrs[idx].addr;
+ } /* end if */
+
/* More setup for child nodes */
left_child = left_int;
right_child = right_int;
@@ -236,7 +251,7 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Create new leaf node */
internal->node_ptrs[idx + 1].all_nrec = internal->node_ptrs[idx + 1].node_nrec = 0;
- if(H5B2__create_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx + 1])) < 0)
+ if(H5B2__create_leaf(hdr, dxpl_id, internal, &(internal->node_ptrs[idx + 1])) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new leaf node")
/* Setup information for unlocking child nodes */
@@ -245,11 +260,18 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow the left node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx]), &left_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ left_addr = internal->node_ptrs[idx].addr;
+ } /* end if */
+
/* More setup for child nodes */
left_child = left_leaf;
right_child = right_leaf;
@@ -288,7 +310,6 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Determine total number of records in new child nodes */
if(depth > 1) {
- unsigned u; /* Local index variable */
hsize_t new_left_all_nrec; /* New total number of records in left child */
hsize_t new_right_all_nrec; /* New total number of records in right child */
@@ -322,6 +343,65 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
if(parent_cache_info_flags_ptr)
*parent_cache_info_flags_ptr |= H5AC__DIRTIED_FLAG;
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = 0; u < (*right_nrec + (unsigned)1); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = right_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, right_child, right_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == left_child) {
+ grandchild_int->parent = right_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == right_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, right_child, right_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == left_child) {
+ grandchild_leaf->parent = right_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == right_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, right_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
#ifdef H5B2_DEBUG
H5B2__assert_internal((hsize_t)0, hdr, internal);
if(depth > 1) {
@@ -341,6 +421,17 @@ done:
if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree leaf node")
+ /* Release grandchild node if protected (only on error) */
+ if(grandchild && H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, left_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+
+ /* Unprotect the grandchild on error */
+ if(grandchild) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, grandchild_addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B2__split1() */
@@ -400,11 +491,11 @@ H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
/* Create new internal node to use as root */
hdr->root.node_nrec = 0;
- if(H5B2__create_internal(hdr, dxpl_id, &(hdr->root), hdr->depth) < 0)
+ if(H5B2__create_internal(hdr, dxpl_id, hdr, &(hdr->root), hdr->depth) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node")
/* Protect new root node */
- if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC__NO_FLAGS_SET)))
+ if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr, hdr->root.node_nrec, hdr->depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set first node pointer in root node to old root node pointer info */
@@ -444,11 +535,15 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5AC_class_t *child_class; /* Pointer to child node's class info */
haddr_t left_addr, right_addr; /* Addresses of left & right child nodes */
void *left_child = NULL, *right_child = NULL; /* Pointers to child nodes */
+ const H5AC_class_t *grandchild_class; /* Pointer to grandchild node's class info */
+ haddr_t grandchild_addr; /* Grandchild address */
+ void *grandchild = NULL; /* Pointer to grandchild node */
uint16_t *left_nrec, *right_nrec; /* Pointers to child # of records */
uint8_t *left_native, *right_native; /* Pointers to childs' native records */
H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */
hssize_t left_moved_nrec = 0, right_moved_nrec = 0; /* Number of records moved, for internal redistrib */
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -468,11 +563,21 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow both nodes if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx]), &left_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx + 1]), &right_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ left_addr = internal->node_ptrs[idx].addr;
+ right_addr = internal->node_ptrs[idx + 1].addr;
+ } /* end if */
+
/* More setup for child nodes */
left_child = left_internal;
right_child = right_internal;
@@ -493,11 +598,21 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow both nodes if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx]), &left_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx + 1]), &right_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ left_addr = internal->node_ptrs[idx].addr;
+ right_addr = internal->node_ptrs[idx + 1].addr;
+ } /* end if */
+
/* More setup for child nodes */
left_child = left_leaf;
right_child = right_leaf;
@@ -542,7 +657,6 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Handle node pointers, if we have an internal node */
if(depth > 1) {
hsize_t moved_nrec = move_nrec; /* Total number of records moved, for internal redistrib */
- unsigned u; /* Local index variable */
/* Count the number of records being moved */
for(u = 0; u < move_nrec; u++)
@@ -557,6 +671,65 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[move_nrec]), sizeof(H5B2_node_ptr_t) * (new_right_nrec + (unsigned)1));
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = (*left_nrec + (unsigned)1); u < (*left_nrec + (unsigned)move_nrec + (unsigned)1); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = left_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == right_child) {
+ grandchild_int->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == left_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == right_child) {
+ grandchild_leaf->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == left_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, left_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update number of records in child nodes */
*left_nrec = (uint16_t)(*left_nrec + move_nrec);
*right_nrec = new_right_nrec;
@@ -592,7 +765,6 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Handle node pointers, if we have an internal node */
if(depth > 1) {
hsize_t moved_nrec = move_nrec; /* Total number of records moved, for internal redistrib */
- unsigned u; /* Local index variable */
/* Slide node pointers in right node up */
HDmemmove(&(right_node_ptrs[move_nrec]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1));
@@ -607,6 +779,65 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5_CHECKED_ASSIGN(right_moved_nrec, hssize_t, moved_nrec, hsize_t)
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = 0; u < move_nrec; u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = right_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, right_child, right_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == left_child) {
+ grandchild_int->parent = right_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == right_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, right_child, right_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == left_child) {
+ grandchild_leaf->parent = right_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == right_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, right_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update number of records in child nodes */
*left_nrec = new_left_nrec;
*right_nrec = (uint16_t)(*right_nrec + move_nrec);
@@ -649,6 +880,17 @@ done:
if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
+ /* Release grandchild node if protected (only on error) */
+ if(grandchild && H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, left_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+
+ /* Unprotect the grandchild on error */
+ if(grandchild) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, grandchild_addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B2__redistribute2() */
@@ -678,6 +920,9 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
haddr_t middle_addr; /* Address of middle child node */
void *left_child = NULL, *right_child = NULL; /* Pointers to child nodes */
void *middle_child = NULL; /* Pointers to middle child node */
+ const H5AC_class_t *grandchild_class; /* Pointer to grandchild node's class info */
+ haddr_t grandchild_addr; /* Grandchild address */
+ void *grandchild = NULL; /* Pointer to grandchild node */
uint16_t *left_nrec, *right_nrec; /* Pointers to child # of records */
uint16_t *middle_nrec; /* Pointers to middle child # of records */
uint8_t *left_native, *right_native; /* Pointers to childs' native records */
@@ -686,6 +931,7 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
hssize_t middle_moved_nrec = 0; /* Number of records moved, for internal split */
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -708,13 +954,26 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ /* Shadow all nodes if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx - 1]), &left_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx]), &middle_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx + 1]), &right_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ left_addr = internal->node_ptrs[idx - 1].addr;
+ middle_addr = internal->node_ptrs[idx].addr;
+ right_addr = internal->node_ptrs[idx + 1].addr;
+ } /* end if */
+
/* More setup for child nodes */
left_child = left_internal;
middle_child = middle_internal;
@@ -741,13 +1000,26 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow all nodes if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx - 1]), &left_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx]), &middle_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx + 1]), &right_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ left_addr = internal->node_ptrs[idx - 1].addr;
+ middle_addr = internal->node_ptrs[idx].addr;
+ right_addr = internal->node_ptrs[idx + 1].addr;
+ } /* end if */
+
/* More setup for child nodes */
left_child = left_leaf;
middle_child = middle_leaf;
@@ -797,7 +1069,6 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
if(depth > 1) {
hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */
unsigned move_nptrs; /* Number of node pointers to move */
- unsigned u; /* Local index variable */
/* Move middle node pointers into left node */
move_nptrs = (unsigned)(new_left_nrec - *left_nrec);
@@ -813,6 +1084,65 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[move_nptrs]), sizeof(H5B2_node_ptr_t) * ((*middle_nrec - move_nptrs) + 1));
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = (*left_nrec + (unsigned)1); u < (*left_nrec + (unsigned)moved_middle_nrec + 1); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = left_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == middle_child) {
+ grandchild_int->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == left_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == middle_child) {
+ grandchild_leaf->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == left_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)middle_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, left_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update the current number of records in middle node */
curr_middle_nrec = (uint16_t)(curr_middle_nrec - moved_middle_nrec);
@@ -841,7 +1171,6 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Move node pointers also if this is an internal node */
if(depth > 1) {
hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */
- unsigned u; /* Local index variable */
/* Slide the node pointers in right node up */
HDmemmove(&(right_node_ptrs[right_nrec_move]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1));
@@ -856,6 +1185,65 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
middle_moved_nrec -= (hssize_t)(moved_nrec + right_nrec_move);
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = 0; u < right_nrec_move; u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = right_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, right_child, right_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == middle_child) {
+ grandchild_int->parent = right_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == right_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, right_child, right_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == middle_child) {
+ grandchild_leaf->parent = right_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == right_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)middle_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, right_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update the current number of records in middle node */
curr_middle_nrec = (uint16_t)(curr_middle_nrec - right_nrec_move);
@@ -884,7 +1272,6 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Move node pointers also if this is an internal node */
if(depth > 1) {
hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */
- unsigned u; /* Local index variable */
/* Slide the node pointers in middle node up */
HDmemmove(&(middle_node_ptrs[left_nrec_move]), &(middle_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(curr_middle_nrec + 1));
@@ -899,6 +1286,65 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
middle_moved_nrec += (hssize_t)(moved_nrec + left_nrec_move);
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = 0; u < left_nrec_move; u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = middle_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, middle_child, middle_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == left_child) {
+ grandchild_int->parent = middle_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == middle_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, middle_child, middle_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == left_child) {
+ grandchild_leaf->parent = middle_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == middle_child);
+ } /* end for */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)middle_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end else */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, middle_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update the current number of records in middle node */
curr_middle_nrec = (uint16_t)(curr_middle_nrec + left_nrec_move);
@@ -926,7 +1372,6 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Move node pointers also if this is an internal node */
if(depth > 1) {
hsize_t moved_nrec; /* Total number of records moved, for internal redistrib */
- unsigned u; /* Local index variable */
/* Move right node pointers into middle node */
HDmemcpy(&(middle_node_ptrs[curr_middle_nrec + 1]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * right_nrec_move);
@@ -941,6 +1386,65 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDmemmove(&(right_node_ptrs[0]), &(right_node_ptrs[right_nrec_move]), sizeof(H5B2_node_ptr_t) * (size_t)(new_right_nrec + 1));
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = (curr_middle_nrec + (unsigned)1); u < (curr_middle_nrec + right_nrec_move + 1); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = middle_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, middle_child, middle_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == right_child) {
+ grandchild_int->parent = middle_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == middle_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, middle_child, middle_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == right_child) {
+ grandchild_leaf->parent = middle_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == middle_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)middle_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, middle_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Mark nodes as dirty */
middle_child_flags |= H5AC__DIRTIED_FLAG;
right_child_flags |= H5AC__DIRTIED_FLAG;
@@ -1036,6 +1540,13 @@ done:
if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
+ /* Unprotect the grandchild on error */
+ if(grandchild) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, grandchild_addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B2__redistribute3() */
@@ -1063,10 +1574,14 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
const H5AC_class_t *child_class; /* Pointer to child node's class info */
haddr_t left_addr, right_addr; /* Addresses of left & right child nodes */
void *left_child = NULL, *right_child = NULL; /* Pointers to left & right child nodes */
+ const H5AC_class_t *grandchild_class; /* Pointer to grandchild node's class info */
+ haddr_t grandchild_addr; /* Grandchild address */
+ void *grandchild = NULL; /* Pointer to grandchild node */
uint16_t *left_nrec, *right_nrec; /* Pointers to left & right child # of records */
uint8_t *left_native, *right_native; /* Pointers to left & right children's native records */
H5B2_node_ptr_t *left_node_ptrs = NULL, *right_node_ptrs = NULL;/* Pointers to childs' node pointer info */
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1088,11 +1603,18 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ /* Shadow the left node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx]), &left_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ left_addr = internal->node_ptrs[idx].addr;
+ } /* end if */
+
/* More setup for accessing child node information */
left_child = left_internal;
right_child = right_internal;
@@ -1113,11 +1635,18 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow the left node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx]), &left_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ left_addr = internal->node_ptrs[idx].addr;
+ } /* end if */
+
/* More setup for accessing child node information */
left_child = left_leaf;
right_child = right_leaf;
@@ -1139,12 +1668,73 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
if(depth > 1)
HDmemcpy(&(left_node_ptrs[*left_nrec + 1]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1));
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = (*left_nrec + (unsigned)1); u < (*left_nrec + (unsigned)*right_nrec + (unsigned)2); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = left_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == right_child) {
+ grandchild_int->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == left_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == right_child) {
+ grandchild_leaf->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == left_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, left_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update # of records in left node */
*left_nrec = (uint16_t)(*left_nrec + *right_nrec + 1);
/* Mark nodes as dirty */
left_child_flags |= H5AC__DIRTIED_FLAG;
- right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ right_child_flags |= H5AC__DELETED_FLAG;
+ if(!(hdr->swmr_write))
+ right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
} /* end block */
/* Update # of records in child nodes */
@@ -1189,6 +1779,13 @@ done:
if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
+ /* Unprotect the grandchild on error */
+ if(grandchild) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, grandchild_addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B2__merge2() */
@@ -1218,6 +1815,9 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
haddr_t middle_addr; /* Address of middle child node */
void *left_child = NULL, *right_child = NULL; /* Pointers to left & right child nodes */
void *middle_child = NULL; /* Pointer to middle child node */
+ const H5AC_class_t *grandchild_class; /* Pointer to grandchild node's class info */
+ haddr_t grandchild_addr; /* Grandchild address */
+ void *grandchild = NULL; /* Pointer to grandchild node */
uint16_t *left_nrec, *right_nrec; /* Pointers to left & right child # of records */
uint16_t *middle_nrec; /* Pointer to middle child # of records */
uint8_t *left_native, *right_native; /* Pointers to left & right children's native records */
@@ -1227,6 +1827,7 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
hsize_t middle_moved_nrec; /* Number of records moved, for internal split */
unsigned left_child_flags = H5AC__NO_FLAGS_SET, right_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
unsigned middle_child_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting child nodes */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1250,13 +1851,23 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
- if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ /* Shadow left and middle nodes if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx - 1]), &left_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ if(H5B2__shadow_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[idx]), &middle_internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ left_addr = internal->node_ptrs[idx - 1].addr;
+ middle_addr = internal->node_ptrs[idx].addr;
+ } /* end if */
+
/* More setup for accessing child node information */
left_child = left_internal;
middle_child = middle_internal;
@@ -1283,13 +1894,23 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
- if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
- if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow left and middle nodes if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx - 1]), &left_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ if(H5B2__shadow_leaf(hdr, dxpl_id, &(internal->node_ptrs[idx]), &middle_leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ left_addr = internal->node_ptrs[idx - 1].addr;
+ middle_addr = internal->node_ptrs[idx].addr;
+ } /* end if */
+
/* More setup for accessing child node information */
left_child = left_leaf;
middle_child = middle_leaf;
@@ -1324,7 +1945,6 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Move node pointers also if this is an internal node */
if(depth > 1) {
- unsigned u; /* Local index variable */
/* Copy node pointers from middle node into left node */
HDmemcpy(&(left_node_ptrs[*left_nrec + 1]), &(middle_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * middle_nrec_move);
@@ -1337,6 +1957,65 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDmemmove(&(middle_node_ptrs[0]), &(middle_node_ptrs[middle_nrec_move]), sizeof(H5B2_node_ptr_t) * (size_t)((unsigned)(*middle_nrec + 1) - middle_nrec_move));
} /* end if */
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = (*left_nrec + (unsigned)1); u < (*left_nrec + (unsigned)middle_nrec_move + (unsigned)1); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = left_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == middle_child) {
+ grandchild_int->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == left_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, left_child, left_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == middle_child) {
+ grandchild_leaf->parent = left_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == left_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)middle_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)left_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, left_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update # of records in left & middle nodes */
*left_nrec = (uint16_t)(*left_nrec + middle_nrec_move);
*middle_nrec = (uint16_t)(*middle_nrec - middle_nrec_move);
@@ -1359,12 +2038,73 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Copy node pointers from right node into middle node */
HDmemcpy(&(middle_node_ptrs[*middle_nrec + 1]), &(right_node_ptrs[0]), sizeof(H5B2_node_ptr_t) * (size_t)(*right_nrec + 1));
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+
+ /* Update node pointers */
+ if(depth > 1) {
+ /* Loop over grandchildren */
+ for(u = (*middle_nrec + (unsigned)1); u < (*middle_nrec + (unsigned)*right_nrec + (unsigned)2); u++) {
+ hbool_t update_deps = FALSE; /* Whether to update flush dependencies */
+
+ grandchild_addr = middle_node_ptrs[u].addr;
+ if(depth > 2) {
+ H5B2_internal_t *grandchild_int = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_int = H5B2__protect_internal(hdr, dxpl_id, grandchild_addr, middle_child, middle_node_ptrs[u].node_nrec, (uint16_t)(depth - 2), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ grandchild_class = H5AC_BT2_INT;
+ grandchild = grandchild_int;
+
+ if(grandchild_int->parent == right_child) {
+ grandchild_int->parent = middle_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_int->parent == middle_child);
+ } /* end if */
+ else {
+ H5B2_leaf_t *grandchild_leaf = NULL;
+
+ /* Protect grandchild */
+ if(NULL == (grandchild_leaf = H5B2__protect_leaf(hdr, dxpl_id, grandchild_addr, middle_child, middle_node_ptrs[u].node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ grandchild_class = H5AC_BT2_LEAF;
+ grandchild = grandchild_leaf;
+
+ if(grandchild_leaf->parent == right_child) {
+ grandchild_leaf->parent = middle_child;
+ update_deps = TRUE;
+ } /* end if */
+ else
+ HDassert(grandchild_leaf->parent == middle_child);
+ } /* end else */
+
+ /* Update flush dependencies if necessary */
+ if(update_deps) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)right_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)middle_child, (H5AC_info_t *)grandchild) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the grandchild */
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, middle_node_ptrs[u].addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ grandchild = NULL;
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
/* Update # of records in middle node */
*middle_nrec = (uint16_t)(*middle_nrec + (*right_nrec + 1));
/* Mark nodes as dirty */
middle_child_flags |= H5AC__DIRTIED_FLAG;
- right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ right_child_flags |= H5AC__DELETED_FLAG;
+ if(!(hdr->swmr_write))
+ right_child_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
} /* end block */
/* Update # of records in child nodes */
@@ -1417,6 +2157,13 @@ done:
if(right_child && H5AC_unprotect(hdr->f, dxpl_id, child_class, right_addr, right_child, right_child_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree child node")
+ /* Unprotect the grandchild on error */
+ if(grandchild) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, grandchild_class, grandchild_addr, grandchild, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B2__merge3() */
@@ -1464,7 +2211,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child nodes */
- if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@@ -1479,7 +2226,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child node */
- if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@@ -1527,7 +2274,7 @@ done:
*/
herr_t
H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
- H5B2_nodepos_t curr_pos, void *udata)
+ H5B2_nodepos_t curr_pos, void *parent, void *udata)
{
H5B2_leaf_t *leaf; /* Pointer to leaf node */
int cmp; /* Comparison value of records */
@@ -1542,9 +2289,14 @@ H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, parent, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Shadow the node if doing SWMR writes */
+ if(hdr->swmr_write)
+ if(H5B2__shadow_leaf(hdr, dxpl_id, curr_node_ptr, &leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+
/* Must have a leaf node with enough space to insert a record now */
HDassert(curr_node_ptr->node_nrec < hdr->node_info[0].max_nrec);
@@ -1624,7 +2376,7 @@ done:
herr_t
H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
unsigned *parent_cache_info_flags_ptr, H5B2_node_ptr_t *curr_node_ptr,
- H5B2_nodepos_t curr_pos, void *udata)
+ H5B2_nodepos_t curr_pos, void *parent, void *udata)
{
H5B2_internal_t *internal = NULL; /* Pointer to internal node */
unsigned internal_flags = H5AC__NO_FLAGS_SET;
@@ -1641,9 +2393,14 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, parent, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ /* Shadow the node if doing SWMR writes */
+ if(hdr->swmr_write)
+ if(H5B2__shadow_internal(hdr, dxpl_id, depth, curr_node_ptr, &internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+
/* Split or redistribute child node pointers, if necessary */
{
int cmp; /* Comparison value of records */
@@ -1731,11 +2488,11 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Attempt to insert node */
if(depth > 1) {
- if(H5B2__insert_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], next_pos, udata) < 0)
+ if(H5B2__insert_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal_flags, &internal->node_ptrs[idx], next_pos, internal, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree internal node")
} /* end if */
else {
- if(H5B2__insert_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], next_pos, udata) < 0)
+ if(H5B2__insert_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], next_pos, internal, udata) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINSERT, FAIL, "unable to insert record into B-tree leaf node")
} /* end else */
@@ -1769,7 +2526,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr)
+H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent, H5B2_node_ptr_t *node_ptr)
{
H5B2_leaf_t *leaf = NULL; /* Pointer to new leaf node created */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1804,6 +2561,13 @@ HDmemset(leaf->leaf_native, 0, hdr->cls->nrec_size * hdr->node_info[0].max_nrec)
/* Set number of records */
leaf->nrec = 0;
+ /* Set parent */
+ leaf->parent = parent;
+
+ /* Set shadowed list next and prev pointers */
+ leaf->shadowed_next = NULL;
+ leaf->shadowed_prev = NULL;
+
/* Allocate space on disk for the leaf */
if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for B-tree leaf node")
@@ -1837,8 +2601,8 @@ done:
*-------------------------------------------------------------------------
*/
H5B2_leaf_t *
-H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
- unsigned flags)
+H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, void *parent,
+ uint16_t nrec, unsigned flags)
{
H5B2_leaf_cache_ud_t udata; /* User-data for callback */
H5B2_leaf_t *ret_value = NULL; /* Return value */
@@ -1855,6 +2619,7 @@ H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
+ udata.parent = parent;
H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned)
/* Protect the leaf node */
@@ -1881,8 +2646,8 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *node_ptr,
- uint16_t depth)
+H5B2__create_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent,
+ H5B2_node_ptr_t *node_ptr, uint16_t depth)
{
H5B2_internal_t *internal = NULL; /* Pointer to new internal node created */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1926,6 +2691,13 @@ HDmemset(internal->node_ptrs, 0, sizeof(H5B2_node_ptr_t) * (hdr->node_info[depth
internal->nrec = 0;
internal->depth = depth;
+ /* Set parent */
+ internal->parent = parent;
+
+ /* Set shadowed list next and prev pointers */
+ internal->shadowed_next = NULL;
+ internal->shadowed_prev = NULL;
+
/* Allocate space on disk for the internal node */
if(HADDR_UNDEF == (node_ptr->addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for B-tree internal node")
@@ -1960,7 +2732,7 @@ done:
*/
H5B2_internal_t *
H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
- uint16_t nrec, uint16_t depth, unsigned flags)
+ void *parent, uint16_t nrec, uint16_t depth, unsigned flags)
{
H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */
H5B2_internal_t *ret_value = NULL; /* Return value */
@@ -1978,6 +2750,7 @@ H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
+ udata.parent = parent;
udata.nrec = nrec;
udata.depth = depth;
@@ -2009,13 +2782,15 @@ done:
*/
herr_t
H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
- const H5B2_node_ptr_t *curr_node, H5B2_operator_t op, void *op_data)
+ const H5B2_node_ptr_t *curr_node, void *parent, H5B2_operator_t op,
+ void *op_data)
{
const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */
void *node = NULL; /* Pointers to current node */
uint8_t *node_native; /* Pointers to node's native records */
uint8_t *native = NULL; /* Pointers to copy of node's native records */
H5B2_node_ptr_t *node_ptrs = NULL; /* Pointers to node's node pointers */
+ hbool_t node_pinned = FALSE; /* Whether node is pinned */
unsigned u; /* Local index */
herr_t ret_value = H5_ITER_CONT; /* Iterator return value */
@@ -2031,7 +2806,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal; /* Pointer to internal node */
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, parent, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@@ -2050,7 +2825,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, parent, curr_node->node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@@ -2067,15 +2842,18 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDmemcpy(native, node_native, (hdr->cls->nrec_size * curr_node->node_nrec));
/* Unlock the node */
- if(H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
- node = NULL;
+ if(hdr->swmr_write)
+ node_pinned = TRUE;
+ else
+ node = NULL;
/* Iterate through records, in order */
for(u = 0; u < curr_node->node_nrec && !ret_value; u++) {
/* Descend into child node, if current node is an internal node */
if(depth > 0) {
- if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), op, op_data)) < 0)
+ if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), node, op, op_data)) < 0)
HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed");
} /* end if */
@@ -2088,11 +2866,15 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Descend into last child node, if current node is an internal node */
if(!ret_value && depth > 0) {
- if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), op, op_data)) < 0)
+ if((ret_value = H5B2__iterate_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(node_ptrs[u]), node, op, op_data)) < 0)
HERROR(H5E_BTREE, H5E_CANTLIST, "node iteration failed");
} /* end if */
done:
+ /* Unpin the node if it was pinned */
+ if(node_pinned && H5AC_unpin_entry(node) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "can't unpin node")
+
/* Release the node pointers & native records, if they were copied */
if(node_ptrs)
node_ptrs = (H5B2_node_ptr_t *)H5FL_FAC_FREE(hdr->node_info[depth].node_ptr_fac, node_ptrs);
@@ -2118,7 +2900,7 @@ done:
*/
herr_t
H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
- H5B2_nodepos_t curr_pos, void *udata, H5B2_remove_t op, void *op_data)
+ H5B2_nodepos_t curr_pos, void *parent, void *udata, H5B2_remove_t op, void *op_data)
{
H5B2_leaf_t *leaf; /* Pointer to leaf node */
haddr_t leaf_addr = HADDR_UNDEF; /* Leaf address on disk */
@@ -2135,7 +2917,7 @@ H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
/* Lock current B-tree node */
leaf_addr = curr_node_ptr->addr;
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, parent, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@@ -2175,17 +2957,26 @@ H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
/* Update number of records in node */
leaf->nrec--;
- /* Mark leaf node as dirty also */
- leaf_flags |= H5AC__DIRTIED_FLAG;
-
if(leaf->nrec > 0) {
+ /* Shadow the node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, curr_node_ptr, &leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ leaf_addr = curr_node_ptr->addr;
+ } /* end if */
+
/* Pack record out of leaf */
if(idx < leaf->nrec)
HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), hdr->cls->nrec_size * (leaf->nrec - idx));
+
+ /* Mark leaf node as dirty also */
+ leaf_flags |= H5AC__DIRTIED_FLAG;
} /* end if */
else {
/* Let the cache know that the object is deleted */
- leaf_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ leaf_flags |= H5AC__DELETED_FLAG;
+ if(!hdr->swmr_write)
+ leaf_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
/* Reset address of parent node pointer */
curr_node_ptr->addr = HADDR_UNDEF;
@@ -2218,7 +3009,7 @@ done:
*/
herr_t
H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
- void *swap_loc, uint16_t depth, H5AC_info_t *parent_cache_info,
+ void *swap_loc, void *swap_parent, uint16_t depth, H5AC_info_t *parent_cache_info,
unsigned *parent_cache_info_flags_ptr, H5B2_nodepos_t curr_pos,
H5B2_node_ptr_t *curr_node_ptr, void *udata, H5B2_remove_t op, void *op_data)
{
@@ -2226,6 +3017,8 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
unsigned *new_cache_info_flags_ptr = NULL;
H5B2_node_ptr_t *new_node_ptr; /* Pointer to new node pointer */
H5B2_internal_t *internal; /* Pointer to internal node */
+ const H5AC_class_t *new_root_class; /* Pointer to new root node's class info */
+ void *new_root = NULL; /* Pointer to new root node (if old root collapsed) */
H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of next node */
unsigned internal_flags = H5AC__NO_FLAGS_SET;
haddr_t internal_addr; /* Address of internal node */
@@ -2244,7 +3037,7 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, parent_cache_info, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Determine the correct number of records to merge at */
@@ -2261,12 +3054,66 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
/* Let the cache know that the object is deleted */
- internal_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ internal_flags |= H5AC__DELETED_FLAG;
+ if(!hdr->swmr_write)
+ internal_flags |= H5AC__FREE_FILE_SPACE_FLAG;
/* Reset information in header's root node pointer */
curr_node_ptr->addr = internal->node_ptrs[0].addr;
curr_node_ptr->node_nrec = internal->node_ptrs[0].node_nrec;
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+ hbool_t update_dep = FALSE; /* Whether to update root flush dependency */
+
+ /* Update hdr to root dependency */
+ if(depth > 1) {
+ H5B2_internal_t *new_root_int = NULL;
+
+ /* Protect new root */
+ if(NULL == (new_root_int = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, hdr, curr_node_ptr->node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ new_root_class = H5AC_BT2_INT;
+ new_root = new_root_int;
+
+ if(new_root_int->parent == internal) {
+ new_root_int->parent = hdr;
+ update_dep = TRUE;
+ } /* end if */
+ else
+ HDassert(new_root_int->parent == hdr);
+ } /* end if */
+ else {
+ H5B2_leaf_t *new_root_leaf = NULL;
+
+ /* Protect new root */
+ if(NULL == (new_root_leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, hdr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ new_root_class = H5AC_BT2_LEAF;
+ new_root = new_root_leaf;
+
+ if(new_root_leaf->parent == internal) {
+ new_root_leaf->parent = hdr;
+ update_dep = TRUE;
+ } /* end if */
+ else
+ HDassert(new_root_leaf->parent == hdr);
+ } /* end else */
+
+ /* Update flush dependency if necessary */
+ if(update_dep) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)internal, (H5AC_info_t *)new_root) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)hdr, (H5AC_info_t *)new_root) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the new root */
+ if(H5AC_unprotect(hdr->f, dxpl_id, new_root_class, curr_node_ptr->addr, new_root, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ new_root = NULL;
+ } /* end if */
+
/* Indicate that the level of the B-tree decreased */
*depth_decreased = TRUE;
@@ -2287,6 +3134,13 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
int cmp = 0; /* Comparison value of records */
unsigned retries; /* Number of times to attempt redistribution */
+ /* Shadow the node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, depth, curr_node_ptr, &internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ internal_addr = curr_node_ptr->addr;
+ } /* end if */
+
/* Locate node pointer for child */
if(swap_loc)
idx = 0;
@@ -2363,8 +3217,10 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
} /* end while */
/* Handle deleting a record from an internal node */
- if(!swap_loc && cmp == 0)
+ if(!swap_loc && cmp == 0) {
swap_loc = H5B2_INT_NREC(internal, hdr, idx - 1);
+ swap_parent = internal;
+ } /* end if */
/* Swap record to delete with record from leaf, if we are the last internal node */
if(swap_loc && depth == 1)
@@ -2391,12 +3247,12 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Attempt to remove record from child node */
if(depth > 1) {
- if(H5B2__remove_internal(hdr, dxpl_id, depth_decreased, swap_loc, (uint16_t)(depth - 1),
+ if(H5B2__remove_internal(hdr, dxpl_id, depth_decreased, swap_loc, swap_parent, (uint16_t)(depth - 1),
new_cache_info, new_cache_info_flags_ptr, next_pos, new_node_ptr, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
} /* end if */
else {
- if(H5B2__remove_leaf(hdr, dxpl_id, new_node_ptr, next_pos, udata, op, op_data) < 0)
+ if(H5B2__remove_leaf(hdr, dxpl_id, new_node_ptr, next_pos, new_cache_info, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -2405,7 +3261,8 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
new_node_ptr->all_nrec--;
/* Mark node as dirty */
- internal_flags |= H5AC__DIRTIED_FLAG;
+ if(!(hdr->swmr_write && collapsed_root))
+ internal_flags |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal);
@@ -2416,6 +3273,13 @@ done:
if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node")
+ /* Release the new root's child on error */
+ if(new_root) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, new_root_class, curr_node_ptr->addr, new_root, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2__remove_internal() */
@@ -2436,7 +3300,7 @@ done:
*/
herr_t
H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
- H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent,
unsigned idx, H5B2_remove_t op, void *op_data)
{
H5B2_leaf_t *leaf; /* Pointer to leaf node */
@@ -2453,7 +3317,7 @@ H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock B-tree leaf node */
leaf_addr = curr_node_ptr->addr;
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, parent, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@@ -2490,17 +3354,26 @@ H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Update number of records in node */
leaf->nrec--;
- /* Mark leaf node as dirty also */
- leaf_flags |= H5AC__DIRTIED_FLAG;
-
if(leaf->nrec > 0) {
+ /* Shadow the node if doing SWMR writes */
+ if(hdr->swmr_write) {
+ if(H5B2__shadow_leaf(hdr, dxpl_id, curr_node_ptr, &leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow leaf node")
+ leaf_addr = curr_node_ptr->addr;
+ } /* end if */
+
/* Pack record out of leaf */
if(idx < leaf->nrec)
HDmemmove(H5B2_LEAF_NREC(leaf, hdr, idx), H5B2_LEAF_NREC(leaf, hdr, (idx + 1)), hdr->cls->nrec_size * (leaf->nrec - idx));
+
+ /* Mark leaf node as dirty also */
+ leaf_flags |= H5AC__DIRTIED_FLAG;
} /* end if */
else {
/* Let the cache know that the object is deleted */
- leaf_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ leaf_flags |= H5AC__DELETED_FLAG;
+ if(!hdr->swmr_write)
+ leaf_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
/* Reset address of parent node pointer */
curr_node_ptr->addr = HADDR_UNDEF;
@@ -2534,14 +3407,16 @@ done:
*/
herr_t
H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
- hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
+ hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n,
- H5B2_remove_t op, void *op_data)
+ void *udata, H5B2_remove_t op, void *op_data)
{
H5AC_info_t *new_cache_info; /* Pointer to new cache info */
unsigned *new_cache_info_flags_ptr = NULL;
H5B2_node_ptr_t *new_node_ptr; /* Pointer to new node pointer */
+ const H5AC_class_t *new_root_class; /* Pointer to new root node's class info */
+ void *new_root = NULL; /* Pointer to new root node (if old root collapsed) */
H5B2_internal_t *internal; /* Pointer to internal node */
H5B2_nodepos_t next_pos = H5B2_POS_MIDDLE; /* Position of next node */
unsigned internal_flags = H5AC__NO_FLAGS_SET;
@@ -2561,7 +3436,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, parent_cache_info, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
HDassert(internal->nrec == curr_node_ptr->node_nrec);
HDassert(depth == hdr->depth || internal->nrec > 1);
@@ -2581,12 +3456,66 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to merge child node")
/* Let the cache know that the object is deleted */
- internal_flags |= H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+ internal_flags |= H5AC__DELETED_FLAG;
+ if(!hdr->swmr_write)
+ internal_flags |= H5AC__FREE_FILE_SPACE_FLAG;
/* Reset information in header's root node pointer */
curr_node_ptr->addr = internal->node_ptrs[0].addr;
curr_node_ptr->node_nrec = internal->node_ptrs[0].node_nrec;
+ /* Update flush dependencies */
+ if(hdr->swmr_write) {
+ hbool_t update_dep = FALSE; /* Whether to update root flush dependency */
+
+ /* Update hdr to root dependency */
+ if(depth > 1) {
+ H5B2_internal_t *new_root_int = NULL;
+
+ /* Protect new root */
+ if(NULL == (new_root_int = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, hdr, curr_node_ptr->node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ new_root_class = H5AC_BT2_INT;
+ new_root = new_root_int;
+
+ if(new_root_int->parent == internal) {
+ new_root_int->parent = hdr;
+ update_dep = TRUE;
+ } /* end if */
+ else
+ HDassert(new_root_int->parent == hdr);
+ } /* end if */
+ else {
+ H5B2_leaf_t *new_root_leaf = NULL;
+
+ /* Protect new root */
+ if(NULL == (new_root_leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, hdr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ new_root_class = H5AC_BT2_LEAF;
+ new_root = new_root_leaf;
+
+ if(new_root_leaf->parent == internal) {
+ new_root_leaf->parent = hdr;
+ update_dep = TRUE;
+ } /* end if */
+ else
+ HDassert(new_root_leaf->parent == hdr);
+ } /* end else */
+
+ /* Update flush dependency if necessary */
+ if(update_dep) {
+ if(H5B2__destroy_flush_depend((H5AC_info_t *)internal, (H5AC_info_t *)new_root) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ if(H5B2__create_flush_depend((H5AC_info_t *)hdr, (H5AC_info_t *)new_root) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ } /* end if */
+
+ /* Unprotect the new root */
+ if(H5AC_unprotect(hdr->f, dxpl_id, new_root_class, curr_node_ptr->addr, new_root, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ new_root = NULL;
+ } /* end if */
+
/* Indicate that the level of the B-tree decreased */
*depth_decreased = TRUE;
@@ -2608,6 +3537,13 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
hbool_t found = FALSE; /* Comparison value of records */
unsigned retries; /* Number of times to attempt redistribution */
+ /* Shadow the node if doing SWMR writes */
+ if(hdr->swmr_write && !collapsed_root) {
+ if(H5B2__shadow_internal(hdr, dxpl_id, depth, curr_node_ptr, &internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTCOPY, FAIL, "unable to shadow internal node")
+ internal_addr = curr_node_ptr->addr;
+ } /* end if */
+
/* Locate node pointer for child */
if(swap_loc)
idx = 0;
@@ -2735,8 +3671,10 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
} /* end while */
/* Handle deleting a record from an internal node */
- if(!swap_loc && found)
+ if(!swap_loc && found) {
swap_loc = H5B2_INT_NREC(internal, hdr, idx - 1);
+ swap_parent = internal;
+ } /* end if */
/* Swap record to delete with record from leaf, if we are the last internal node */
if(swap_loc && depth == 1)
@@ -2763,12 +3701,12 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Attempt to remove record from child node */
if(depth > 1) {
- if(H5B2__remove_internal_by_idx(hdr, dxpl_id, depth_decreased, swap_loc, (uint16_t)(depth - 1),
- new_cache_info, new_cache_info_flags_ptr, new_node_ptr, next_pos, n, op, op_data) < 0)
+ if(H5B2__remove_internal_by_idx(hdr, dxpl_id, depth_decreased, swap_loc, swap_parent, (uint16_t)(depth - 1),
+ new_cache_info, new_cache_info_flags_ptr, new_node_ptr, next_pos, n, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree internal node")
} /* end if */
else {
- if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, new_node_ptr, next_pos, (unsigned)n, op, op_data) < 0)
+ if(H5B2__remove_leaf_by_idx(hdr, dxpl_id, new_node_ptr, next_pos, new_cache_info, (unsigned)n, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDELETE, FAIL, "unable to remove record from B-tree leaf node")
} /* end else */
@@ -2777,7 +3715,8 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
new_node_ptr->all_nrec--;
/* Mark node as dirty */
- internal_flags |= H5AC__DIRTIED_FLAG;
+ if(!(hdr->swmr_write && collapsed_root))
+ internal_flags |= H5AC__DIRTIED_FLAG;
#ifdef H5B2_DEBUG
H5B2__assert_internal((!collapsed_root ? (curr_node_ptr->all_nrec - 1) : new_node_ptr->all_nrec), hdr, internal);
@@ -2788,6 +3727,13 @@ done:
if(internal && H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, internal_addr, internal, internal_flags) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release internal B-tree node")
+ /* Release the new root's child on error */
+ if(new_root) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(hdr->f, dxpl_id, new_root_class, curr_node_ptr->addr, new_root, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2__remove_internal_by_idx() */
@@ -2820,7 +3766,7 @@ done:
*/
herr_t
H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr,
- void *neighbor_loc, H5B2_compare_t comp, void *udata, H5B2_found_t op,
+ void *neighbor_loc, H5B2_compare_t comp, void *parent, void *udata, H5B2_found_t op,
void *op_data)
{
H5B2_leaf_t *leaf; /* Pointer to leaf node */
@@ -2837,7 +3783,7 @@ H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_p
HDassert(op);
/* Lock current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, parent, curr_node_ptr->node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate node pointer for child */
@@ -2907,7 +3853,7 @@ done:
herr_t
H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, H5B2_compare_t comp,
- void *udata, H5B2_found_t op, void *op_data)
+ void *parent, void *udata, H5B2_found_t op, void *op_data)
{
H5B2_internal_t *internal; /* Pointer to internal node */
unsigned idx; /* Location of record which matches key */
@@ -2924,7 +3870,7 @@ H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(op);
/* Lock current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, parent, curr_node_ptr->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Locate node pointer for child */
@@ -2946,11 +3892,11 @@ H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Attempt to find neighboring record */
if(depth > 1) {
- if(H5B2__neighbor_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0)
+ if(H5B2__neighbor_internal(hdr, dxpl_id, (uint16_t)(depth - 1), &internal->node_ptrs[idx], neighbor_loc, comp, internal, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree internal node")
} /* end if */
else {
- if(H5B2__neighbor_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], neighbor_loc, comp, udata, op, op_data) < 0)
+ if(H5B2__neighbor_leaf(hdr, dxpl_id, &internal->node_ptrs[idx], neighbor_loc, comp, internal, udata, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to find neighbor record in B-tree leaf node")
} /* end else */
@@ -2979,7 +3925,8 @@ done:
*/
herr_t
H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
- const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data)
+ const H5B2_node_ptr_t *curr_node, void *parent, H5B2_remove_t op,
+ void *op_data)
{
const H5AC_class_t *curr_node_class = NULL; /* Pointer to current node's class info */
void *node = NULL; /* Pointers to current node */
@@ -2997,7 +3944,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
unsigned u; /* Local index */
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__NO_FLAGS_SET)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, parent, curr_node->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@@ -3007,14 +3954,14 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Descend into children */
for(u = 0; u < internal->nrec + (unsigned)1; u++)
- if(H5B2__delete_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), op, op_data) < 0)
+ if(H5B2__delete_node(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), internal, op, op_data) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node descent failed")
} /* end if */
else {
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__NO_FLAGS_SET)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, parent, curr_node->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@@ -3037,7 +3984,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
done:
/* Unlock & delete current node */
- if(node && H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG) < 0)
+ if(node && H5AC_unprotect(hdr->f, dxpl_id, curr_node_class, curr_node->addr, node, (unsigned)(H5AC__DELETED_FLAG | (hdr->swmr_write ? 0 : H5AC__FREE_FILE_SPACE_FLAG))) < 0)
HDONE_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
@@ -3059,7 +4006,7 @@ done:
*/
herr_t
H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
- const H5B2_node_ptr_t *curr_node, hsize_t *btree_size)
+ const H5B2_node_ptr_t *curr_node, void *parent, hsize_t *btree_size)
{
H5B2_internal_t *internal = NULL; /* Pointer to internal node */
herr_t ret_value = SUCCEED; /* Iterator return value */
@@ -3073,7 +4020,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(depth > 0);
/* Lock the current B-tree node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, parent, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Recursively descend into child nodes, if we are above the "twig" level in the B-tree */
@@ -3082,7 +4029,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
/* Descend into children */
for(u = 0; u < internal->nrec + (unsigned)1; u++)
- if(H5B2__node_size(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), btree_size) < 0)
+ if(H5B2__node_size(hdr, dxpl_id, (uint16_t)(depth - 1), &(internal->node_ptrs[u]), internal, btree_size) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node iteration failed")
} /* end if */
else /* depth is 1: count all the leaf nodes from this node */
@@ -3184,6 +4131,207 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B2__leaf_free() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2__shadow_internal
+ *
+ * Purpose: "Shadow: an internal node - copy it to a new location,
+ * leaving the data in the old location intact (for now).
+ * This is done when writing in SWMR mode to ensure that
+ * readers do not see nodes that are out of date with
+ * respect to each other and thereby inconsistent.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Apr 27 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B2__shadow_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_internal_t **internal)
+{
+ hbool_t node_pinned = FALSE;
+ hbool_t node_protected = TRUE;
+ haddr_t new_node_addr; /* Address to move node to */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+ HDassert(depth > 0);
+ HDassert(curr_node_ptr);
+ HDassert(H5F_addr_defined(curr_node_ptr->addr));
+ HDassert(internal);
+ HDassert(*internal);
+ HDassert(hdr->swmr_write);
+ HDassert((*internal)->hdr == hdr);
+
+ /* We only need to shadow the node if it has not been shadowed since the
+ * last time the header was flushed, as otherwise it will be unreachable by
+ * the readers so there will be no need to shadow. To check if it has been
+ * shadowed, check if it is on the shadowed node list. shadowed_next will
+ * be equal to internal if this node is at the head, so it can be used to
+ * determine if this node is in the list. */
+ if(!(*internal)->shadowed_next) {
+ /*
+ * We must clone the old node so readers with an out-of-date version of
+ * the parent can still see the correct number of children, via the
+ * shadowed node. Remove it from cache but do not mark it free on disk.
+ */
+ /* Allocate space for the cloned node */
+ if(HADDR_UNDEF == (new_node_addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move B-tree node")
+
+ /* Pin old entry so it is not flushed when we unprotect */
+ if(H5AC_pin_protected_entry(*internal) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPIN, FAIL, "unable to pin old B-tree node")
+ node_pinned = TRUE;
+
+ /* Unprotect node so we can move it. Do not mark it dirty yet so it is
+ * not flushed to the old location (however unlikely). */
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr->addr, *internal, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release old B-tree node")
+ node_protected = FALSE;
+
+ /* Move the location of the old child on the disk */
+ if(H5AC_move_entry(hdr->f, H5AC_BT2_INT, curr_node_ptr->addr, new_node_addr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTMOVE, FAIL, "unable to move B-tree node")
+ curr_node_ptr->addr = new_node_addr;
+
+ /* Re-protect node at new address. Should have the same location in
+ * memory. */
+ if(*internal != H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, (*internal)->parent, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
+ node_protected = TRUE;
+
+ /* Add node to shadowed node list */
+ if(hdr->shadowed_internal) {
+ (*internal)->shadowed_next = hdr->shadowed_internal;
+ hdr->shadowed_internal->shadowed_prev = *internal;
+ } /* end if */
+ else
+ (*internal)->shadowed_next = *internal;
+ hdr->shadowed_internal = *internal;
+ } /* end if */
+
+done:
+ if(node_pinned)
+ if(H5AC_unpin_entry(*internal) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin internal B-tree node")
+
+ if(!node_protected) {
+ HDassert(ret_value < 0);
+ *internal = NULL;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__shadow_internal() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2__shadow_leaf
+ *
+ * Purpose: "Shadow: a leaf node - copy it to a new location, leaving
+ * the data in the old location intact (for now). This is
+ * done when writing in SWMR mode to ensure that readers do
+ * not see nodes that are out of date with respect to each
+ * other and thereby inconsistent.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Apr 27 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5B2__shadow_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_leaf_t **leaf)
+{
+ hbool_t node_pinned = FALSE;
+ hbool_t node_protected = TRUE;
+ haddr_t new_node_addr; /* Address to move node to */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /*
+ * Check arguments.
+ */
+ HDassert(hdr);
+ HDassert(curr_node_ptr);
+ HDassert(H5F_addr_defined(curr_node_ptr->addr));
+ HDassert(leaf);
+ HDassert(*leaf);
+ HDassert(hdr->swmr_write);
+ HDassert((*leaf)->hdr == hdr);
+
+ /* We only need to shadow the node if it has not been shadowed since the
+ * last time the header was flushed, as otherwise it will be unreachable by
+ * the readers so there will be no need to shadow. To check if it has been
+ * shadowed, check if it is on the shadowed node list. shadowed_next will
+ * be equal to leaf if this node is at the head, so it can be used to
+ * determine if this node is in the list. */
+ if(!(*leaf)->shadowed_next) {
+ /*
+ * We must clone the old node so readers with an out-of-date version of
+ * the parent can still see the correct number of children, via the
+ * shadowed node. Remove it from cache but do not mark it free on disk.
+ */
+ /* Allocate space for the cloned node */
+ if(HADDR_UNDEF == (new_node_addr = H5MF_alloc(hdr->f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->node_size)))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, FAIL, "unable to allocate file space to move B-tree node")
+
+ /* Pin old entry so it is not flushed when we unprotect */
+ if(H5AC_pin_protected_entry(*leaf) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPIN, FAIL, "unable to pin old B-tree node")
+ node_pinned = TRUE;
+
+ /* Unprotect node so we can move it. Do not mark it dirty yet so it is
+ * not flushed to the old location (however unlikely). */
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_LEAF, curr_node_ptr->addr, *leaf, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release old B-tree node")
+ node_protected = FALSE;
+
+ /* Move the location of the old child on the disk */
+ if(H5AC_move_entry(hdr->f, H5AC_BT2_LEAF, curr_node_ptr->addr, new_node_addr) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTMOVE, FAIL, "unable to move B-tree node")
+ curr_node_ptr->addr = new_node_addr;
+
+ /* Re-protect node at new address. Should have the same location in
+ * memory. */
+ if(*leaf != H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, (*leaf)->parent, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET))
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ node_protected = TRUE;
+
+ /* Add node to shadowed node list */
+ if(hdr->shadowed_leaf) {
+ (*leaf)->shadowed_next = hdr->shadowed_leaf;
+ hdr->shadowed_leaf->shadowed_prev = *leaf;
+ } /* end if */
+ else
+ (*leaf)->shadowed_next = *leaf;
+ hdr->shadowed_leaf = *leaf;
+ } /* end if */
+
+done:
+ if(node_pinned)
+ if(H5AC_unpin_entry(*leaf) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin leaf B-tree node")
+
+ if(!node_protected) {
+ HDassert(ret_value < 0);
+ *leaf = NULL;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__shadow_leaf() */
+
#ifdef H5B2_DEBUG
/*-------------------------------------------------------------------------
@@ -3316,3 +4464,67 @@ H5B2__assert_internal2(hsize_t parent_all_nrec, const H5B2_hdr_t *hdr, const H5B
} /* end H5B2__assert_internal2() */
#endif /* H5B2_DEBUG */
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2__create_flush_depend
+ *
+ * Purpose: Create a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5B2__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Create a flush dependency between parent and child entry */
+ if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__create_flush_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2__destroy_flush_depend
+ *
+ * Purpose: Destroy a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5B2__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Destroy a flush dependency between parent and child entry */
+ if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5B2__destroy_flush_depend() */
+
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index d661efa..834ef3d 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -170,6 +170,7 @@ typedef struct H5B2_hdr_t {
/* Shared internal data structures (not stored) */
H5F_t *f; /* Pointer to the file that the B-tree is in */
+ void *parent; /* Flush dependency parent */
haddr_t addr; /* Address of B-tree header in the file */
size_t hdr_size; /* Size of the B-tree header on disk */
size_t rc; /* Reference count of nodes using this header */
@@ -182,6 +183,9 @@ typedef struct H5B2_hdr_t {
uint8_t *page; /* Common disk page for I/O */
size_t *nat_off; /* Array of offsets of native records */
H5B2_node_info_t *node_info; /* Table of node info structs for current depth of B-tree */
+ hbool_t swmr_write; /* Whether we are doing SWMR writes */
+ struct H5B2_leaf_t *shadowed_leaf; /* Linked list of shadowed leaf nodes */
+ struct H5B2_internal_t *shadowed_internal; /* Linked list of shadowed internal nodes */
uint8_t *min_native_rec; /* Pointer to minimum native record */
uint8_t *max_native_rec; /* Pointer to maximum native record */
@@ -197,8 +201,11 @@ typedef struct H5B2_leaf_t {
/* Internal B-tree information */
H5B2_hdr_t *hdr; /* Pointer to the [pinned] v2 B-tree header */
+ void *parent; /* Flush dependency parent */
uint8_t *leaf_native; /* Pointer to native records */
uint16_t nrec; /* Number of records in node */
+ struct H5B2_leaf_t *shadowed_next; /* Next node in shadowed list */
+ struct H5B2_leaf_t *shadowed_prev; /* Previous node in shadowed list */
} H5B2_leaf_t;
/* B-tree internal node information */
@@ -208,10 +215,13 @@ typedef struct H5B2_internal_t {
/* Internal B-tree information */
H5B2_hdr_t *hdr; /* Pointer to the [pinned] v2 B-tree header */
+ void *parent; /* Flush dependency parent */
uint8_t *int_native; /* Pointer to native records */
H5B2_node_ptr_t *node_ptrs; /* Pointer to node pointers */
uint16_t nrec; /* Number of records in node */
uint16_t depth; /* Depth of this node in the B-tree */
+ struct H5B2_internal_t *shadowed_next; /* Next node in shadowed list */
+ struct H5B2_internal_t *shadowed_prev; /* Previous node in shadowed list */
} H5B2_internal_t;
/* v2 B-tree */
@@ -231,6 +241,7 @@ typedef enum H5B2_nodepos_t {
/* Callback info for loading a free space header into the cache */
typedef struct H5B2_hdr_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
+ void *parent; /* Flush dependency parent */
haddr_t addr; /* Address of B-tree header in the file */
void *ctx_udata; /* User-data for protecting */
} H5B2_hdr_cache_ud_t;
@@ -239,6 +250,7 @@ typedef struct H5B2_hdr_cache_ud_t {
typedef struct H5B2_internal_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
H5B2_hdr_t *hdr; /* v2 B-tree header */
+ void *parent; /* Flush dependency parent */
uint16_t nrec; /* Number of records in node to load */
uint16_t depth; /* Depth of node to load */
} H5B2_internal_cache_ud_t;
@@ -247,14 +259,15 @@ typedef struct H5B2_internal_cache_ud_t {
typedef struct H5B2_leaf_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
H5B2_hdr_t *hdr; /* v2 B-tree header */
+ void *parent; /* Flush dependency parent */
uint16_t nrec; /* Number of records in node to load */
} H5B2_leaf_cache_ud_t;
#ifdef H5B2_TESTING
/* Node information for testing */
typedef struct {
- unsigned depth; /* Depth of node */
- unsigned nrec; /* Number of records in node */
+ uint16_t depth; /* Depth of node */
+ uint16_t nrec; /* Number of records in node */
} H5B2_node_info_test_t;
#endif /* H5B2_TESTING */
@@ -291,12 +304,18 @@ extern const H5B2_class_t *const H5B2_client_class_g[H5B2_NUM_BTREE_ID];
/* Package Private Prototypes */
/******************************/
+/* Generic routines */
+H5_DLL herr_t H5B2__create_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+H5_DLL herr_t H5B2__destroy_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+
/* Routines for managing B-tree header info */
H5_DLL H5B2_hdr_t *H5B2__hdr_alloc(H5F_t *f);
H5_DLL haddr_t H5B2__hdr_create(H5F_t *f, hid_t dxpl_id,
- const H5B2_create_t *cparam, void *ctx_udata);
+ const H5B2_create_t *cparam, void *ctx_udata, void *parent);
H5_DLL herr_t H5B2__hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
- void *ctx_udata, uint16_t depth);
+ void *ctx_udata, void *parent, uint16_t depth);
H5_DLL herr_t H5B2__hdr_incr(H5B2_hdr_t *hdr);
H5_DLL herr_t H5B2__hdr_decr(H5B2_hdr_t *hdr);
H5_DLL herr_t H5B2__hdr_fuse_incr(H5B2_hdr_t *hdr);
@@ -306,15 +325,15 @@ H5_DLL herr_t H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id);
/* Routines for operating on leaf nodes */
H5B2_leaf_t *H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
- uint16_t nrec, unsigned flags);
+ void *parent, uint16_t nrec, unsigned flags);
/* Routines for operating on internal nodes */
H5_DLL H5B2_internal_t *H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- haddr_t addr, uint16_t nrec, uint16_t depth, unsigned flags);
+ haddr_t addr, void *parent, uint16_t nrec, uint16_t depth, unsigned flags);
/* Routines for allocating nodes */
H5_DLL herr_t H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id);
-H5_DLL herr_t H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
+H5_DLL herr_t H5B2__create_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, void *parent,
H5B2_node_ptr_t *node_ptr);
/* Routines for releasing structures */
@@ -325,47 +344,50 @@ H5_DLL herr_t H5B2__internal_free(H5B2_internal_t *i);
/* Routines for inserting records */
H5_DLL herr_t H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
uint16_t depth, unsigned *parent_cache_info_flags_ptr,
- H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *udata);
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, void *udata);
H5_DLL herr_t H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
- H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *udata);
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent, void *udata);
/* Routines for iterating over nodes/records */
H5_DLL herr_t H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
- const H5B2_node_ptr_t *curr_node, H5B2_operator_t op, void *op_data);
+ const H5B2_node_ptr_t *curr_node, void *parent, H5B2_operator_t op, void *op_data);
H5_DLL herr_t H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id,
- uint16_t depth, const H5B2_node_ptr_t *curr_node, hsize_t *op_data);
+ uint16_t depth, const H5B2_node_ptr_t *curr_node, void *parent,
+ hsize_t *op_data);
/* Routines for locating records */
H5_DLL int H5B2__locate_record(const H5B2_class_t *type, unsigned nrec,
size_t *rec_off, const uint8_t *native, const void *udata, unsigned *idx);
H5_DLL herr_t H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
uint16_t depth, H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc,
- H5B2_compare_t comp, void *udata, H5B2_found_t op, void *op_data);
+ H5B2_compare_t comp, void *parent, void *udata, H5B2_found_t op,
+ void *op_data);
H5_DLL herr_t H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
- H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc,
- H5B2_compare_t comp, void *udata, H5B2_found_t op, void *op_data);
+ H5B2_node_ptr_t *curr_node_ptr, void *neighbor_loc, H5B2_compare_t comp,
+ void *parent, void *udata, H5B2_found_t op, void *op_data);
/* Routines for removing records */
H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
- hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
+ hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_nodepos_t curr_pos, H5B2_node_ptr_t *curr_node_ptr, void *udata,
H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
- H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent,
void *udata, H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
- hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
+ hbool_t *depth_decreased, void *swap_loc, void *swap_parent, uint16_t depth,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n,
- H5B2_remove_t op, void *op_data);
+ void *udata, H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
- H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,
+ H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, void *parent,
unsigned idx, H5B2_remove_t op, void *op_data);
/* Routines for deleting nodes */
H5_DLL herr_t H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
- const H5B2_node_ptr_t *curr_node, H5B2_remove_t op, void *op_data);
+ const H5B2_node_ptr_t *curr_node, void *parent, H5B2_remove_t op,
+ void *op_data);
/* Debugging routines for dumping file structures */
H5_DLL herr_t H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
diff --git a/src/H5B2private.h b/src/H5B2private.h
index 9e3c2d7..1047a29 100644
--- a/src/H5B2private.h
+++ b/src/H5B2private.h
@@ -31,7 +31,8 @@
#include "H5B2public.h"
/* Private headers needed by this file */
-#include "H5Fprivate.h" /* File access */
+#include "H5ACprivate.h" /* Metadata cache */
+#include "H5Fprivate.h" /* File access */
/**************************/
/* Library Private Macros */
@@ -54,6 +55,8 @@ typedef enum H5B2_subid_t {
H5B2_SOHM_INDEX_ID, /* B-tree is an index for shared object header messages */
H5B2_ATTR_DENSE_NAME_ID, /* B-tree is for indexing 'name' field for "dense" attribute storage on objects */
H5B2_ATTR_DENSE_CORDER_ID, /* B-tree is for indexing 'creation order' field for "dense" attribute storage on objects */
+ H5B2_CDSET_ID, /* B-tree is for non-filtered chunked dataset storage w/ >1 unlim dims */
+ H5B2_CDSET_FILT_ID, /* B-tree is for filtered chunked dataset storage w/ >1 unlim dims */
H5B2_NUM_BTREE_ID /* Number of B-tree IDs (must be last) */
} H5B2_subid_t;
@@ -126,8 +129,9 @@ typedef struct H5B2_t H5B2_t;
/* Library-private Function Prototypes */
/***************************************/
H5_DLL H5B2_t *H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
- void *ctx_udata);
-H5_DLL H5B2_t *H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata);
+ void *ctx_udata, void *parent);
+H5_DLL H5B2_t *H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata,
+ void *parent);
H5_DLL herr_t H5B2_get_addr(const H5B2_t *bt2, haddr_t *addr/*out*/);
H5_DLL herr_t H5B2_insert(H5B2_t *bt2, hid_t dxpl_id, void *udata);
H5_DLL herr_t H5B2_iterate(H5B2_t *bt2, hid_t dxpl_id, H5B2_operator_t op,
@@ -143,13 +147,20 @@ H5_DLL herr_t H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5_DLL herr_t H5B2_remove(H5B2_t *b2, hid_t dxpl_id, void *udata,
H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2_remove_by_idx(H5B2_t *bt2, hid_t dxpl_id,
- H5_iter_order_t order, hsize_t idx, H5B2_remove_t op, void *op_data);
+ H5_iter_order_t order, hsize_t idx, void *udata, H5B2_remove_t op,
+ void *op_data);
H5_DLL herr_t H5B2_get_nrec(const H5B2_t *bt2, hsize_t *nrec);
H5_DLL herr_t H5B2_size(H5B2_t *bt2, hid_t dxpl_id,
hsize_t *btree_size);
H5_DLL herr_t H5B2_close(H5B2_t *bt2, hid_t dxpl_id);
H5_DLL herr_t H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr,
- void *ctx_udata, H5B2_remove_t op, void *op_data);
+ void *ctx_udata, void *parent, H5B2_remove_t op, void *op_data);
+H5_DLL htri_t H5B2_support(H5B2_t *bt2, hid_t dxpl_id, void *udata,
+ H5AC_info_t *child);
+H5_DLL herr_t H5B2_unsupport(H5B2_t *bt2, hid_t dxpl_id, void *udata,
+ H5AC_info_t *child);
+H5_DLL herr_t H5B2_depend(H5AC_info_t *parent_entry, H5B2_t *bt2);
+H5_DLL herr_t H5B2_undepend(H5AC_info_t *parent_entry, H5B2_t *bt2);
/* Statistics routines */
H5_DLL herr_t H5B2_stat_info(H5B2_t *bt2, H5B2_stat_t *info);
diff --git a/src/H5B2stat.c b/src/H5B2stat.c
index 10c692e..da721c6 100644
--- a/src/H5B2stat.c
+++ b/src/H5B2stat.c
@@ -139,7 +139,7 @@ H5B2_size(H5B2_t *bt2, hid_t dxpl_id, hsize_t *btree_size)
*btree_size += hdr->node_size;
else
/* Iterate through nodes */
- if(H5B2__node_size(hdr, dxpl_id, hdr->depth, &hdr->root, btree_size) < 0)
+ if(H5B2__node_size(hdr, dxpl_id, hdr->depth, &hdr->root, hdr, btree_size) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLIST, FAIL, "node iteration failed")
} /* end if */
diff --git a/src/H5B2test.c b/src/H5B2test.c
index 87fddf7..654659b 100644
--- a/src/H5B2test.c
+++ b/src/H5B2test.c
@@ -67,8 +67,8 @@ static herr_t H5B2__test_store(void *nrecord, const void *udata);
static herr_t H5B2__test_compare(const void *rec1, const void *rec2);
static herr_t H5B2__test_encode(uint8_t *raw, const void *nrecord, void *ctx);
static herr_t H5B2__test_decode(const uint8_t *raw, void *nrecord, void *ctx);
-static herr_t H5B2__test_debug(FILE *stream, int indent, int fwidth,
- const void *record, const void *_udata);
+static herr_t H5B2__test_debug(FILE *stream, const H5F_t *f, hid_t dxpl_id,
+ int indent, int fwidth, const void *record, const void *_udata);
static void *H5B2__test_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t addr);
@@ -295,7 +295,8 @@ H5B2__test_decode(const uint8_t *raw, void *nrecord, void *_ctx)
*-------------------------------------------------------------------------
*/
static herr_t
-H5B2__test_debug(FILE *stream, int indent, int fwidth, const void *record,
+H5B2__test_debug(FILE *stream, const H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id,
+ int indent, int fwidth, const void *record,
const void H5_ATTR_UNUSED *_udata)
{
FUNC_ENTER_STATIC_NOERR
@@ -396,6 +397,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
{
H5B2_hdr_t *hdr; /* Pointer to the B-tree header */
H5B2_node_ptr_t curr_node_ptr; /* Node pointer info for current node */
+ void *parent = NULL; /* Parent of current node */
uint16_t depth; /* Current depth of the tree */
int cmp; /* Comparison value of records */
unsigned idx; /* Location of record which matches key */
@@ -415,6 +417,10 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
/* Make copy of the root node pointer to start search with */
curr_node_ptr = hdr->root;
+ /* Set initial parent, if doing swmr writes */
+ if(hdr->swmr_write)
+ parent = hdr;
+
/* Current depth of the tree */
depth = hdr->depth;
@@ -429,9 +435,16 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
- if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Locate node pointer for child */
cmp = H5B2__locate_record(hdr->cls, internal->nrec, hdr->nat_off, internal->int_native, udata, &idx);
if(cmp > 0)
@@ -442,9 +455,13 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
next_node_ptr = internal->node_ptrs[idx];
/* Unlock current node */
- if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_unprotect(hdr->f, dxpl_id, H5AC_BT2_INT, curr_node_ptr.addr, internal, (unsigned)(hdr->swmr_write ? H5AC__PIN_ENTRY_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPROTECT, FAIL, "unable to release B-tree node")
+ /* Keep track of parent if necessary */
+ if(hdr->swmr_write)
+ parent = internal;
+
/* Set pointer to next node to load */
curr_node_ptr = next_node_ptr;
} /* end if */
@@ -469,9 +486,16 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
- if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
+ if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, parent, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
+ /* Unpin parent if necessary */
+ if(parent) {
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HGOTO_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ parent = NULL;
+ } /* end if */
+
/* Locate record */
cmp = H5B2__locate_record(hdr->cls, leaf->nrec, hdr->nat_off, leaf->leaf_native, udata, &idx);
@@ -489,6 +513,12 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
ninfo->nrec = curr_node_ptr.node_nrec;
done:
+ if(parent) {
+ HDassert(ret_value < 0);
+ if(parent != hdr && H5AC_unpin_entry(parent) < 0)
+ HDONE_ERROR(H5E_BTREE, H5E_CANTUNPIN, FAIL, "unable to unpin parent entry")
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5B2_get_node_info_test() */
diff --git a/src/H5Bcache.c b/src/H5Bcache.c
index d08f1bc..73136b4 100644
--- a/src/H5Bcache.c
+++ b/src/H5Bcache.c
@@ -54,7 +54,8 @@
/********************/
/* Metadata cache callbacks */
-static herr_t H5B__get_load_size(const void *udata, size_t *image_len);
+static herr_t H5B__get_load_size(const void *image, void *udata, size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static void *H5B__deserialize(const void *image, size_t len, void *udata,
hbool_t *dirty);
static herr_t H5B__image_len(const void *thing, size_t *image_len,
@@ -75,6 +76,7 @@ const H5AC_class_t H5AC_BT[1] = {{
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5B__get_load_size, /* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
H5B__deserialize, /* 'deserialize' callback */
H5B__image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -105,9 +107,11 @@ const H5AC_class_t H5AC_BT[1] = {{
*-------------------------------------------------------------------------
*/
static herr_t
-H5B__get_load_size(const void *_udata, size_t *image_len)
+H5B__get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5B_cache_ud_t *udata = (const H5B_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into image buffer */
+ H5B_cache_ud_t *udata = (H5B_cache_ud_t *)_udata; /* User data for callback */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
FUNC_ENTER_STATIC_NOERR
@@ -116,12 +120,17 @@ H5B__get_load_size(const void *_udata, size_t *image_len)
HDassert(udata);
HDassert(image_len);
- /* Get shared info for B-tree */
- shared = (H5B_shared_t *)H5UC_GET_OBJ(udata->rc_shared);
- HDassert(shared);
+ if(image == NULL) {
+ /* Get shared info for B-tree */
+ shared = (H5B_shared_t *)H5UC_GET_OBJ(udata->rc_shared);
+ HDassert(shared);
- /* Set the image length size */
- *image_len = shared->sizeof_rnode;
+ /* Set the image length size */
+ *image_len = shared->sizeof_rnode;
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B__get_load_size() */
@@ -384,4 +393,3 @@ H5B__free_icr(void *thing)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B__free_icr() */
-
diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c
index d92a24b..ed7ccf2 100644
--- a/src/H5Bdbg.c
+++ b/src/H5Bdbg.c
@@ -77,6 +77,9 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f
HDassert(fwidth >= 0);
HDassert(type);
+ /* Currently does not support SWMR access */
+ HDassert(!(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE));
+
/* Get shared info for B-tree */
if(NULL == (rc_shared = (type->get_shared)(f, udata)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTGET, FAIL, "can't retrieve B-tree's shared ref. count object")
@@ -89,6 +92,7 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
+ cache_udata.parent = NULL;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h
index e645626..e9cf69a 100644
--- a/src/H5Bpkg.h
+++ b/src/H5Bpkg.h
@@ -59,12 +59,16 @@ typedef struct H5B_t {
haddr_t right; /*address of right sibling */
uint8_t *native; /*array of keys in native format */
haddr_t *child; /*2k child pointers */
+
+ /* Not stored on disk */
+ void *parent; /* Flush dependency parent */
} H5B_t;
/* Callback info for loading a B-tree node into the cache */
typedef struct H5B_cache_ud_t {
H5F_t *f; /* File that B-tree node is within */
const struct H5B_class_t *type; /* Type of tree */
+ void *parent; /* Flush dependency parent */
H5UC_t *rc_shared; /* Ref-counted shared info */
} H5B_cache_ud_t;
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 02fb82c..3251a47 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -177,6 +177,6 @@ H5_DLL herr_t H5B_shared_free(void *_shared);
H5_DLL herr_t H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream,
int indent, int fwidth, const H5B_class_t *type, void *udata);
H5_DLL htri_t H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type,
- haddr_t addr);
+ haddr_t addr, void *parent);
#endif /* _H5Bprivate_H */
diff --git a/src/H5C.c b/src/H5C.c
index 2a4b4cf..448e6fe 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -83,7 +83,8 @@
/***********/
#include "H5private.h" /* Generic Functions */
#ifdef H5_HAVE_PARALLEL
-#include "H5ACprivate.h" /* Metadata cache */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#include "H5ACpkg.h" /* Metadata cache */
#endif /* H5_HAVE_PARALLEL */
#include "H5Cpkg.h" /* Cache */
#include "H5Eprivate.h" /* Error handling */
@@ -169,17 +170,27 @@ static herr_t H5C_tag_entry(H5C_t * cache_ptr,
H5C_cache_entry_t * entry_ptr,
hid_t dxpl_id);
-static herr_t H5C_flush_tagged_entries(H5F_t * f,
- hid_t dxpl_id,
- H5C_t * cache_ptr,
- haddr_t tag);
+static herr_t H5C_mark_tagged_entries(H5C_t * cache_ptr,
+ haddr_t tag,
+ hbool_t mark_clean);
-static herr_t H5C_mark_tagged_entries(H5C_t * cache_ptr,
- haddr_t tag);
+static herr_t H5C_mark_tagged_entries_cork(H5C_t *cache_ptr,
+ haddr_t obj_addr,
+ hbool_t val);
static herr_t H5C_flush_marked_entries(H5F_t * f,
hid_t dxpl_id);
+static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
+
+static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
+
+static herr_t H5C_verify_len_eoa (H5F_t *f,
+ const H5C_class_t * type,
+ haddr_t addr,
+ size_t *len,
+ htri_t actual);
+
#if H5C_DO_TAGGING_SANITY_CHECKS
static herr_t H5C_verify_tag(int id, haddr_t tag);
#endif
@@ -195,6 +206,11 @@ static herr_t H5C_validate_pinned_entry_list(H5C_t * cache_ptr);
static herr_t H5C_validate_protected_entry_list(H5C_t * cache_ptr);
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+#ifndef NDEBUG
+static void H5C__assert_flush_dep_nocycle(H5C_cache_entry_t * entry,
+ H5C_cache_entry_t * base_entry);
+#endif /* NDEBUG */
+
#if 0 /* debugging routines */
herr_t H5C_dump_cache(H5C_t * cache_ptr, const char * cache_name);
herr_t H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn);
@@ -220,6 +236,11 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
/* Declare a free list to manage the H5C_t struct */
H5FL_DEFINE_STATIC(H5C_t);
+/* Declare a free list to manage flush dependency arrays */
+H5FL_BLK_DEFINE_STATIC(parent);
+
+/* Declare a free list to manage corked object addresses */
+H5FL_DEFINE_STATIC(haddr_t);
/****************************************************************************
@@ -239,8 +260,15 @@ H5FL_DEFINE_STATIC(H5C_t);
#define H5C__EPOCH_MARKER_TYPE H5C__MAX_NUM_TYPE_IDS
-static herr_t H5C__epoch_marker_get_load_size(const void *udata_ptr,
- size_t *image_len_ptr);
+static herr_t H5C__epoch_marker_get_load_size(const void *image_ptr,
+ void *udata_ptr,
+ size_t *image_len_ptr,
+ size_t *actual_len,
+ hbool_t *compressed_ptr,
+ size_t *compressed_len_ptr);
+static htri_t H5C__epoch_marker_verify_chksum(const void *image_ptr,
+ size_t len,
+ void *udata_ptr);
static void * H5C__epoch_marker_deserialize(const void * image_ptr,
size_t len,
void * udata,
@@ -278,6 +306,7 @@ const H5C_class_t epoch_marker_class =
/* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
/* flags = */ H5AC__CLASS_NO_FLAGS_SET,
/* get_load_size = */ H5C__epoch_marker_get_load_size,
+ /* verify_chksum = */ H5C__epoch_marker_verify_chksum,
/* deserialize = */ H5C__epoch_marker_deserialize,
/* image_len = */ H5C__epoch_marker_image_len,
/* pre_serialize = */ H5C__epoch_marker_pre_serialize,
@@ -299,8 +328,9 @@ const H5C_class_t epoch_marker_class =
*
***************************************************************************/
static herr_t
-H5C__epoch_marker_get_load_size(const void H5_ATTR_UNUSED *udata_ptr,
- size_t H5_ATTR_UNUSED *image_len_ptr)
+H5C__epoch_marker_get_load_size(const void H5_ATTR_UNUSED *image_ptr, void H5_ATTR_UNUSED *udata_ptr,
+ size_t H5_ATTR_UNUSED *image_len_ptr, size_t H5_ATTR_UNUSED *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_len_ptr)
{
FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
@@ -310,6 +340,18 @@ H5C__epoch_marker_get_load_size(const void H5_ATTR_UNUSED *udata_ptr,
} /* end H5C__epoch_marker_get_load_size() */
+static htri_t
+H5C__epoch_marker_verify_chksum(const void H5_ATTR_UNUSED *image_ptr, size_t H5_ATTR_UNUSED len,
+ void H5_ATTR_UNUSED *udata_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FALSE)
+} /* end H5C__epoch_marker_verify_chksum() */
+
+
static void *
H5C__epoch_marker_deserialize(const void H5_ATTR_UNUSED * image_ptr, size_t H5_ATTR_UNUSED len,
void H5_ATTR_UNUSED * udata, hbool_t H5_ATTR_UNUSED * dirty_ptr)
@@ -475,6 +517,11 @@ H5C_create(size_t max_cache_size,
HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list.")
}
+ if ( (cache_ptr->cork_list_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)) == NULL ) {
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for corked object addresses.")
+ }
+
/* If we get this far, we should succeed. Go ahead and initialize all
* the fields.
*/
@@ -483,6 +530,12 @@ H5C_create(size_t max_cache_size,
cache_ptr->flush_in_progress = FALSE;
+ cache_ptr->logging_enabled = FALSE;
+
+ cache_ptr->currently_logging = FALSE;
+
+ cache_ptr->log_file_ptr = NULL;
+
cache_ptr->trace_file_ptr = NULL;
cache_ptr->aux_ptr = aux_ptr;
@@ -643,6 +696,9 @@ done:
if ( cache_ptr->slist_ptr != NULL )
H5SL_close(cache_ptr->slist_ptr);
+ if ( cache_ptr->cork_list_ptr != NULL )
+ H5SL_close(cache_ptr->cork_list_ptr);
+
cache_ptr->magic = 0;
cache_ptr = H5FL_FREE(H5C_t, cache_ptr);
@@ -842,6 +898,35 @@ H5C_def_auto_resize_rpt_fcn(H5C_t * cache_ptr,
/*-------------------------------------------------------------------------
+ * Function: H5C_free_cork_list_cb
+ *
+ * Purpose: Callback function to free the list of object addresses
+ * on the skip list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_free_cork_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
+{
+ haddr_t *addr = (haddr_t *)_item;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(addr);
+
+ /* Release the item */
+ addr = H5FL_FREE(haddr_t, addr);
+
+ FUNC_LEAVE_NOAPI(0)
+} /* H5C_free_cork_list_cb() */
+
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_dest
*
* Purpose: Flush all data to disk and destroy the cache.
@@ -886,6 +971,11 @@ H5C_dest(H5F_t * f, hid_t dxpl_id)
cache_ptr->slist_ptr = NULL;
} /* end if */
+ if(cache_ptr->cork_list_ptr != NULL) {
+ H5SL_destroy(cache_ptr->cork_list_ptr, H5C_free_cork_list_cb, NULL);
+ cache_ptr->cork_list_ptr = NULL;
+ } /* end if */
+
/* Only display count of number of calls to H5C_get_entry_ptr_from_add()
* if NDEBUG is undefined, and H5C_DO_SANITY_CHECKS is defined. Need
* this as the print statement will upset windows, and we frequently
@@ -915,7 +1005,38 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5C_evict
+ *
+ * Purpose: Evict all except pinned entries in the cache
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
*
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_evict(H5F_t * f, hid_t dxpl_id)
+{
+ H5C_t *cache_ptr = f->shared->cache;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Flush and invalidate all cache entries except the pinned entries */
+ if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to evict entries in the cache")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_evict() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_expunge_entry
*
* Purpose: Use this function to tell the cache to expunge an entry
@@ -1512,6 +1633,7 @@ H5C_get_entry_status(const H5F_t *f,
hbool_t * is_dirty_ptr,
hbool_t * is_protected_ptr,
hbool_t * is_pinned_ptr,
+ hbool_t * is_corked_ptr,
hbool_t * is_flush_dep_parent_ptr,
hbool_t * is_flush_dep_child_ptr)
{
@@ -1572,14 +1694,19 @@ H5C_get_entry_status(const H5F_t *f,
*is_pinned_ptr = entry_ptr->is_pinned;
}
+ if ( is_corked_ptr != NULL ) {
+
+ *is_corked_ptr = entry_ptr->is_corked;
+ }
+
if ( is_flush_dep_parent_ptr != NULL ) {
- *is_flush_dep_parent_ptr = (entry_ptr->flush_dep_height > 0);
+ *is_flush_dep_parent_ptr = (entry_ptr->flush_dep_nchildren > 0);
}
if ( is_flush_dep_child_ptr != NULL ) {
- *is_flush_dep_child_ptr = (entry_ptr->flush_dep_parent != NULL);
+ *is_flush_dep_child_ptr = (entry_ptr->flush_dep_nparents > 0);
}
}
@@ -1718,6 +1845,322 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr)
/*-------------------------------------------------------------------------
+ * Function: H5C_set_up_logging
+ *
+ * Purpose: Setup for metadata cache logging.
+ *
+ * Metadata logging is enabled and disabled at two levels. This
+ * function and the associated tear_down function open and close
+ * the log file. the start_ and stop_logging functions are then
+ * used to switch logging on/off. Optionally, logging can begin
+ * as soon as the log file is opened (set via the start_immediately
+ * parameter to this function).
+ *
+ * The log functionality is split between the H5C and H5AC
+ * packages. Log state and direct log manipulation resides in
+ * H5C. Log messages are generated in H5AC and sent to
+ * the H5C_write_log_message function.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[],
+ hbool_t start_immediately)
+{
+#ifdef H5_HAVE_PARALLEL
+ H5AC_aux_t *aux_ptr = NULL;
+#endif /*H5_HAVE_PARALLEL*/
+ char *file_name = NULL;
+ size_t n_chars;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(log_location);
+
+ /* Sanity checks */
+ if(NULL == cache_ptr)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr == NULL")
+
+ if(H5C__H5C_T_MAGIC != cache_ptr->magic)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache magic value incorrect")
+
+ if(cache_ptr->logging_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "logging already set up")
+
+ if(NULL == log_location)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL log location not allowed")
+
+ /* Possibly fix up the log file name.
+ * The extra 39 characters are for adding the rank to the file name
+ * under parallel HDF5. 39 characters allows > 2^127 processes which
+ * should be enough for anybody.
+ *
+ * allocation size = <path length> + dot + <rank # length> + \0
+ */
+ n_chars = HDstrlen(log_location) + 1 + 39 + 1;
+ if(NULL == (file_name = (char *)HDcalloc(n_chars, sizeof(char))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, \
+ "can't allocate memory for mdc log file name manipulation")
+
+#ifdef H5_HAVE_PARALLEL
+
+ /* Add the rank to the log file name when MPI is in use */
+ aux_ptr = (H5AC_aux_t *)(cache_ptr->aux_ptr);
+
+ if(NULL == aux_ptr) {
+ HDsnprintf(file_name, n_chars, "%s", log_location);
+ }
+ else {
+ if(aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC) {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr->magic")
+ }
+ HDsnprintf(file_name, n_chars, "%s.%d", log_location, aux_ptr->mpi_rank);
+ }
+
+#else /* H5_HAVE_PARALLEL */
+
+ HDsnprintf(file_name, n_chars, "%s", log_location);
+
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Open log file */
+ if(NULL == (cache_ptr->log_file_ptr = HDfopen(file_name, "w")))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "can't create mdc log file")
+
+ /* Set logging flags */
+ cache_ptr->logging_enabled = TRUE;
+ cache_ptr->currently_logging = start_immediately;
+
+ done:
+ if(file_name)
+ HDfree(file_name);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_set_up_logging() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_tear_down_logging
+ *
+ * Purpose: Tear-down for metadata cache logging.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_tear_down_logging(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Sanity checks */
+ if(NULL == cache_ptr)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr == NULL")
+
+ if(H5C__H5C_T_MAGIC != cache_ptr->magic)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache magic value incorrect")
+
+ if(FALSE == cache_ptr->logging_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "logging not enabled")
+
+ /* Unset logging flags */
+ cache_ptr->logging_enabled = FALSE;
+ cache_ptr->currently_logging = FALSE;
+
+ /* Close log file */
+ if(EOF == HDfclose(cache_ptr->log_file_ptr))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problem closing mdc log file")
+ cache_ptr->log_file_ptr = NULL;
+
+ done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_tear_down_logging() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_start_logging
+ *
+ * Purpose: Start logging metadata cache operations.
+ *
+ * TODO: Add a function that dumps the current state of the
+ * metadata cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_start_logging(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Sanity checks */
+ if(NULL == cache_ptr)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr == NULL")
+
+ if(H5C__H5C_T_MAGIC != cache_ptr->magic)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache magic value incorrect")
+
+ if(FALSE == cache_ptr->logging_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "logging not enabled")
+
+ if(cache_ptr->currently_logging)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "logging already in progress")
+
+ /* Set logging flags */
+ cache_ptr->currently_logging = TRUE;
+
+ /* TODO - Dump cache state */
+
+ done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_start_logging() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_stop_logging
+ *
+ * Purpose: Stop logging metadata cache operations.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_stop_logging(H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Sanity checks */
+ if(NULL == cache_ptr)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr == NULL")
+
+ if(H5C__H5C_T_MAGIC != cache_ptr->magic)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache magic value incorrect")
+
+ if(FALSE == cache_ptr->logging_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "logging not enabled")
+
+ if(FALSE == cache_ptr->currently_logging)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "logging not in progress")
+
+ /* Set logging flags */
+ cache_ptr->currently_logging = FALSE;
+
+ done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_stop_logging() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_get_logging_status
+ *
+ * Purpose: Determines if the cache is actively logging (via the OUT
+ * parameter).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_get_logging_status(const H5C_t *cache_ptr, /*OUT*/ hbool_t *is_enabled,
+ /*OUT*/ hbool_t *is_currently_logging)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(is_enabled);
+ HDassert(is_currently_logging);
+
+ /* Sanity checks */
+ if(NULL == cache_ptr)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr == NULL")
+
+ if(H5C__H5C_T_MAGIC != cache_ptr->magic)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache magic value incorrect")
+
+ *is_enabled = cache_ptr->logging_enabled;
+ *is_currently_logging = cache_ptr->currently_logging;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_logging_status() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_write_log_message
+ *
+ * Purpose: Write a message to the log file and flush the file.
+ * The message string is neither modified nor freed.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_write_log_message(const H5C_t *cache_ptr, const char message[])
+{
+ size_t n_chars;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(message);
+
+ /* Sanity checks */
+ if(NULL == cache_ptr)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache_ptr == NULL")
+
+ if(H5C__H5C_T_MAGIC != cache_ptr->magic)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "cache magic value incorrect")
+
+ if(FALSE == cache_ptr->currently_logging)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "not currently logging")
+
+ if(NULL == message)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL log message not allowed")
+
+ /* Write the log message and flush */
+ n_chars = HDstrlen(message);
+ if((int)n_chars != HDfprintf(cache_ptr->log_file_ptr, message))
+ HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error writing log message")
+ if(EOF == HDfflush(cache_ptr->log_file_ptr))
+ HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error flushing log message")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_write_log_message() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_insert_entry
*
* Purpose: Adds the specified thing to the cache. The thing need not
@@ -1765,7 +2208,6 @@ H5C_insert_entry(H5F_t * f,
size_t empty_space;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * test_entry_ptr;
- unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1836,6 +2278,10 @@ H5C_insert_entry(H5F_t * f,
if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
+ /* Set the entry's cork status */
+ if(H5C_cork(cache_ptr, entry_ptr->tag, H5C__GET_CORKED, &entry_ptr->is_corked) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Cannot retrieve entry's cork status")
+
entry_ptr->is_protected = FALSE;
entry_ptr->is_read_only = FALSE;
entry_ptr->ro_ref_count = 0;
@@ -1890,9 +2336,10 @@ H5C_insert_entry(H5F_t * f,
/* Initialize flush dependency height fields */
entry_ptr->flush_dep_parent = NULL;
- for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
- entry_ptr->child_flush_dep_height_rc[u] = 0;
- entry_ptr->flush_dep_height = 0;
+ entry_ptr->flush_dep_nparents = 0;
+ entry_ptr->flush_dep_parent_nalloc = 0;
+ entry_ptr->flush_dep_nchildren = 0;
+ entry_ptr->flush_dep_ndirty_children = 0;
entry_ptr->ht_next = NULL;
entry_ptr->ht_prev = NULL;
@@ -2082,10 +2529,15 @@ H5C_mark_entry_dirty(void *thing)
entry_ptr->is_dirty = TRUE;
entry_ptr->image_up_to_date = FALSE;
- if ( was_pinned_unprotected_and_clean ) {
+ /* Propagate the dirty flag up the flush dependency chain if appropriate
+ */
+ if(was_pinned_unprotected_and_clean) {
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr);
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr);
- }
+ if((entry_ptr->flush_dep_ndirty_children == 0) && (entry_ptr->flush_dep_nparents > 0))
+ if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ } /* end if */
if ( ! (entry_ptr->in_slist) ) {
@@ -2127,9 +2579,7 @@ H5C_move_entry(H5C_t * cache_ptr,
{
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * test_entry_ptr = NULL;
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
- hbool_t was_dirty;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ hbool_t was_dirty;
#if H5C_DO_SANITY_CHECKS
hbool_t removed_entry_from_slist = FALSE;
#endif /* H5C_DO_SANITY_CHECKS */
@@ -2226,14 +2676,27 @@ H5C_move_entry(H5C_t * cache_ptr,
if ( ! ( entry_ptr->destroy_in_progress ) ) {
-#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
was_dirty = entry_ptr->is_dirty;
-#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+ entry_ptr->is_dirty = TRUE;
- entry_ptr->is_dirty = TRUE;
- /* This shouldn't be needed, but it keeps the test code happy */
+ /* This shouldn't be needed, but it keeps the test code happy */
entry_ptr->image_up_to_date = FALSE;
+ if ( ! ( entry_ptr->flush_in_progress ) ) {
+
+ /* Propagate the dirty flag up the flush dependency chain if
+ * appropriate */
+ if ( ! ( was_dirty ) ) {
+
+ if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
+ ( entry_ptr->flush_dep_nparents > 0 ) ) {
+
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ }
+ }
+ }
+
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
@@ -2341,6 +2804,18 @@ H5C_resize_entry(void *thing, size_t new_size)
if( entry_ptr->image_ptr )
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ /* Propagate the dirty flag up the flush dependency chain if
+ * appropriate */
+ if ( was_clean ) {
+
+ if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
+ ( entry_ptr->flush_dep_nparents > 0 ) ) {
+
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ }
+ }
+
/* do a flash cache size increase if appropriate */
if ( cache_ptr->flash_size_increase_possible ) {
@@ -2654,7 +3129,7 @@ H5C_protect(H5F_t * f,
#if H5C_DO_TAGGING_SANITY_CHECKS
{
- haddr_t tag = HADDR_UNDEF;
+ H5C_tag_t tag; /* Tag structure */
/* The entry is already in the cache, but make sure that the tag value
being passed in via dxpl is still legal. This will ensure that had
@@ -2663,13 +3138,13 @@ H5C_protect(H5F_t * f,
from disk. */
/* Get the tag from the DXPL */
- if((H5P_get(dxpl, "H5AC_metadata_tag", &tag)) < 0)
+ if((H5P_get(dxpl, "H5C_tag", &tag)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to query property value");
/* Verify tag value */
if(cache_ptr->ignore_tags != TRUE) {
/* Verify legal tag value */
- if((H5C_verify_tag(entry_ptr->type->id, tag)) < 0)
+ if((H5C_verify_tag(entry_ptr->type->id, tag.value)) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed");
} /* end if */
}
@@ -2699,6 +3174,10 @@ H5C_protect(H5F_t * f,
if(H5C_tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, NULL, "Cannot tag metadata entry")
+ /* Set the entry's cork status */
+ if(H5C_cork(cache_ptr, entry_ptr->tag, H5C__GET_CORKED, &entry_ptr->is_corked) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "Cannot retrieve entry's cork status")
+
/* If the entry is very large, and we are configured to allow it,
* we may wish to perform a flash cache size increase.
*/
@@ -2931,6 +3410,17 @@ H5C_protect(H5F_t * f,
}
}
+#ifdef ASK
+ /* If we loaded the entry and the entry's type has a 'notify' callback, send
+ * a 'after insertion' notice now that the entry is fully integrated into
+ * the cache and protected. We must wait until it is protected so it is not
+ * evicted during the notify callback.
+ */
+ if(!hit && entry_ptr->type->notify &&
+ (entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, NULL, "can't notify client about entry inserted into cache")
+#endif
+
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
@@ -4067,7 +4557,7 @@ H5C_dump_cache(H5C_t * cache_ptr,
HDfprintf(stdout, "\n\nDump of metadata cache \"%s\".\n", cache_name);
HDfprintf(stdout,
- "Num: Addr: Len: Type: Prot: Pinned: Dirty:\n");
+ "Num: Addr: Tag: Len: Type: Prot: Pinned: Dirty: Corked:\n");
i = 0;
@@ -4087,14 +4577,16 @@ H5C_dump_cache(H5C_t * cache_ptr,
HDassert( entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC );
HDfprintf(stdout,
- "%s%d 0x%08llx 0x%3llx %2d %d %d %d\n",
+ "%s%d 0x%16llx 0x%3llx 0x%3llx %2d %d %d %d %d\n",
cache_ptr->prefix, i,
(long long)(entry_ptr->addr),
+ (long long)(entry_ptr->tag),
(long long)(entry_ptr->size),
(int)(entry_ptr->type->id),
(int)(entry_ptr->is_protected),
(int)(entry_ptr->is_pinned),
- (int)(entry_ptr->is_dirty));
+ (int)(entry_ptr->is_dirty),
+ (int)(entry_ptr->is_corked));
/* increment node_ptr before we delete its target */
node_ptr = H5SL_next(node_ptr);
@@ -4527,8 +5019,29 @@ H5C_unprotect(H5F_t * f,
#endif /* JRM */
/* Update index for newly dirtied entry */
- if(was_clean && entry_ptr->is_dirty)
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
+ if(was_clean && entry_ptr->is_dirty) {
+
+ /* Propagate the flush dep dirty flag up the flush dependency chain
+ * if appropriate */
+ if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
+ ( entry_ptr->flush_dep_nparents > 0 ) ) {
+
+ if ( H5C__mark_flush_dep_dirty(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ }
+
+ H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
+ } else if ( ! ( was_clean ) && ! ( entry_ptr->is_dirty ) ) {
+
+ /* Propagate the flush dep clean flag up the flush dependency chain
+ * if appropriate */
+ if ( ( entry_ptr->flush_dep_ndirty_children == 0) &&
+ ( entry_ptr->flush_dep_nparents > 0 ) ) {
+
+ if ( H5C__mark_flush_dep_clean(entry_ptr) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
+ }
+ }
/* Pin or unpin the entry as requested. */
if(pin_entry) {
@@ -4901,103 +5414,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5C_adjust_flush_dependency_rc()
- *
- * Purpose: "Atomicly" adjust flush dependency ref. counts for an entry,
- * as a result of a flush dependency child's height changing.
- *
- * Note: Entry will remain in flush dependency relationship with its
- * child entry (i.e. it's not going to get unpinned as a result
- * of this change), but change could trickle upward, if this
- * entry's height changes and it has a flush dependency parent.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Quincey Koziol
- * 3/05/09
- *
- *-------------------------------------------------------------------------
- */
-static void
-H5C_adjust_flush_dependency_rc(H5C_cache_entry_t * cache_entry,
- unsigned old_child_height, unsigned new_child_height)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity checks */
- HDassert(cache_entry);
- HDassert(cache_entry->is_pinned);
- HDassert(cache_entry->flush_dep_height > 0);
- HDassert(cache_entry->flush_dep_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(cache_entry->child_flush_dep_height_rc[old_child_height] > 0);
- HDassert(old_child_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(old_child_height != new_child_height);
- HDassert(new_child_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
-
- /* Adjust ref. counts for entry's flush dependency children heights */
- cache_entry->child_flush_dep_height_rc[new_child_height]++;
- cache_entry->child_flush_dep_height_rc[old_child_height]--;
-
- /* Check for flush dependency height of entry increasing */
- if((new_child_height + 1) > cache_entry->flush_dep_height) {
-
- /* Check if entry has _its_ own parent flush dependency entry */
- if(NULL != cache_entry->flush_dep_parent) {
- /* Adjust flush dependency ref. counts on entry's parent */
- H5C_adjust_flush_dependency_rc(cache_entry->flush_dep_parent, cache_entry->flush_dep_height, new_child_height + 1);
- } /* end if */
-
- /* Set new flush dependency height of entry */
- cache_entry->flush_dep_height = new_child_height + 1;
- } /* end if */
- else {
- /* Check for child's flush dep. height decreasing and ref. count of
- * old child height going to zero, it could mean the parent's
- * flush dependency height dropped.
- */
- if((new_child_height < old_child_height)
- && ((old_child_height + 1) == cache_entry->flush_dep_height)
- && (0 == cache_entry->child_flush_dep_height_rc[old_child_height])) {
- int i; /* Local index variable */
-
- /* Re-scan child flush dependency height ref. counts to determine
- * this entry's height.
- */
-#ifndef NDEBUG
- for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i > (int)new_child_height; i--)
- HDassert(0 == cache_entry->child_flush_dep_height_rc[i]);
-#endif /* NDEBUG */
- for(i = (int)new_child_height; i >= 0; i--)
- /* Check for child flush dependencies of this height */
- if(cache_entry->child_flush_dep_height_rc[i] > 0)
- break;
-
- /* Sanity checks */
- HDassert((unsigned)(i + 1) < cache_entry->flush_dep_height);
-
- /* Check if entry has _its_ own parent flush dependency entry */
- if(NULL != cache_entry->flush_dep_parent) {
- /* Adjust flush dependency ref. counts on entry's parent */
- H5C_adjust_flush_dependency_rc(cache_entry->flush_dep_parent, cache_entry->flush_dep_height, (unsigned)(i + 1));
- } /* end if */
-
- /* Set new flush dependency height of entry */
- cache_entry->flush_dep_height = (unsigned)(i + 1);
- } /* end if */
- } /* end else */
-
-
- /* Post-conditions, for successful operation */
- HDassert(cache_entry->is_pinned);
- HDassert(cache_entry->flush_dep_height > 0);
- HDassert(cache_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(cache_entry->child_flush_dep_height_rc[new_child_height] > 0);
-
- FUNC_LEAVE_NOAPI_VOID
-} /* H5C_adjust_flush_dependency_rc() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5C_create_flush_dependency()
*
* Purpose: Initiates a parent<->child entry flush dependency. The parent
@@ -5009,11 +5425,6 @@ H5C_adjust_flush_dependency_rc(H5C_cache_entry_t * cache_entry,
* currently used to implement Single-Writer/Multiple-Reader (SWMR)
* I/O access for data structures in the file).
*
- * Each child entry can have only one parent entry, but parent
- * entries can have >1 child entries. The flush dependency
- * height of a parent entry is one greater than the max. flush
- * dependency height of its children.
- *
* Creating a flush dependency between two entries will also pin
* the parent entry.
*
@@ -5030,9 +5441,6 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
H5C_t * cache_ptr;
H5C_cache_entry_t * parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent thing's entry */
H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child thing's entry */
-#ifndef NDEBUG
- unsigned prev_flush_dep_height = parent_entry->flush_dep_height; /* Previous flush height for parent entry */
-#endif /* NDEBUG */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -5040,45 +5448,34 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
/* Sanity checks */
HDassert(parent_entry);
HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(parent_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
HDassert(H5F_addr_defined(parent_entry->addr));
HDassert(child_entry);
HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(H5F_addr_defined(child_entry->addr));
- HDassert(child_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
cache_ptr = parent_entry->cache_ptr;
- HDassert(parent_entry->ring == child_entry->ring);
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(cache_ptr == child_entry->cache_ptr);
+#ifndef NDEBUG
+ /* Make sure the parent is not already a parent */
+ {
+ unsigned i;
+
+ for(i=0; i<child_entry->flush_dep_nparents; i++)
+ HDassert(child_entry->flush_dep_parent[i] != parent_entry);
+ } /* end block */
+#endif /* NDEBUG */
/* More sanity checks */
if(child_entry == parent_entry)
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry flush dependency parent can't be itself")
if(!(parent_entry->is_protected || parent_entry->is_pinned))
HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Parent entry isn't pinned or protected")
- if(NULL != child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Child entry already has flush dependency parent")
- {
- H5C_cache_entry_t *tmp_entry = parent_entry; /* Temporary cache entry in flush dependency chain */
- unsigned tmp_flush_height = 0; /* Different in heights of parent entry */
-
- /* Find the top entry in the flush dependency list */
- while(NULL != tmp_entry->flush_dep_parent) {
- tmp_flush_height++;
- tmp_entry = tmp_entry->flush_dep_parent;
- } /* end while */
-
- /* Check if we will make the dependency chain too long */
- if((tmp_flush_height + child_entry->flush_dep_height + 1)
- > H5C__NUM_FLUSH_DEP_HEIGHTS)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Combined flush dependency height too large")
- }
/* Check for parent not pinned */
if(!parent_entry->is_pinned) {
/* Sanity check */
- HDassert(parent_entry->flush_dep_height == 0);
+ HDassert(parent_entry->flush_dep_nchildren == 0);
HDassert(!parent_entry->pinned_from_client);
HDassert(!parent_entry->pinned_from_cache);
@@ -5090,33 +5487,56 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
/* Mark the entry as pinned from the cache's action (possibly redundantly) */
parent_entry->pinned_from_cache = TRUE;
- /* Increment ref. count for parent's flush dependency children heights */
- parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]++;
-
- /* Check for increasing parent flush dependency height */
- if((child_entry->flush_dep_height + 1) > parent_entry->flush_dep_height) {
+ /* Check if we need to resize the child's parent array */
+ if(child_entry->flush_dep_nparents >= child_entry->flush_dep_parent_nalloc) {
+ if(child_entry->flush_dep_parent_nalloc == 0) {
+ /* Array does not exist yet, allocate it */
+ HDassert(!child_entry->flush_dep_parent);
- /* Check if parent entry has _its_ own parent flush dependency entry */
- if(NULL != parent_entry->flush_dep_parent) {
- /* Adjust flush dependency ref. counts on parent entry's parent */
- H5C_adjust_flush_dependency_rc(parent_entry->flush_dep_parent, parent_entry->flush_dep_height, (child_entry->flush_dep_height + 1));
+ if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_MALLOC(parent, H5C_FLUSH_DEP_PARENT_INIT * sizeof(H5C_cache_entry_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc = H5C_FLUSH_DEP_PARENT_INIT;
} /* end if */
+ else {
+ /* Resize existing array */
+ HDassert(child_entry->flush_dep_parent);
- /* Increase flush dependency height of parent entry */
- parent_entry->flush_dep_height = child_entry->flush_dep_height + 1;
+ if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_REALLOC(parent, child_entry->flush_dep_parent, 2 * child_entry->flush_dep_parent_nalloc * sizeof(H5C_cache_entry_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc *= 2;
+ } /* end else */
} /* end if */
- /* Set parent for child entry */
- child_entry->flush_dep_parent = parent_entry;
+ /* Add the dependency to the child's parent array */
+ child_entry->flush_dep_parent[child_entry->flush_dep_nparents] = parent_entry;
+ child_entry->flush_dep_nparents++;
+
+ /* Increment parent's number of children */
+ parent_entry->flush_dep_nchildren++;
+
+ /* Adjust the number of dirty children */
+ if(child_entry->is_dirty || child_entry->flush_dep_ndirty_children > 0) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children < parent_entry->flush_dep_nchildren);
+ parent_entry->flush_dep_ndirty_children++;
+
+ /* Propagate the flush dep dirty flag up the chain if necessary */
+ if(!parent_entry->is_dirty
+ && parent_entry->flush_dep_ndirty_children == 1)
+ if(H5C__mark_flush_dep_dirty(parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't propagate flush dep dirty flag")
+ } /* end if */
/* Post-conditions, for successful operation */
HDassert(parent_entry->is_pinned);
- HDassert(parent_entry->flush_dep_height > 0);
- HDassert(parent_entry->flush_dep_height < H5C__NUM_FLUSH_DEP_HEIGHTS);
- HDassert(prev_flush_dep_height <= parent_entry->flush_dep_height);
- HDassert(parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height] > 0);
- HDassert(NULL != child_entry->flush_dep_parent);
+ HDassert(parent_entry->flush_dep_nchildren > 0);
+ HDassert(child_entry->flush_dep_parent);
+ HDassert(child_entry->flush_dep_nparents > 0);
+ HDassert(child_entry->flush_dep_parent_nalloc > 0);
+#ifndef NDEBUG
+ H5C__assert_flush_dep_nocycle(parent_entry, child_entry);
+#endif /* NDEBUG */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5127,9 +5547,7 @@ done:
* Function: H5C_destroy_flush_dependency()
*
* Purpose: Terminates a parent<-> child entry flush dependency. The
- * parent entry must be pinned and have a positive flush
- * dependency height (which could go to zero as a result of
- * this operation).
+ * parent entry must be pinned.
*
* Return: Non-negative on success/Negative on failure
*
@@ -5144,9 +5562,7 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
H5C_t * cache_ptr;
H5C_cache_entry_t * parent_entry = (H5C_cache_entry_t *)parent_thing; /* Ptr to parent entry */
H5C_cache_entry_t * child_entry = (H5C_cache_entry_t *)child_thing; /* Ptr to child entry */
-#ifndef NDEBUG
- unsigned prev_flush_dep_height = parent_entry->flush_dep_height; /* Previous flush height for parent entry */
-#endif /* NDEBUG */
+ unsigned i; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -5155,9 +5571,7 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
HDassert(parent_entry);
HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(H5F_addr_defined(parent_entry->addr));
- HDassert(parent_entry->flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS);
HDassert(child_entry);
- HDassert(child_entry->flush_dep_parent != child_entry);
HDassert(child_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(H5F_addr_defined(child_entry->addr));
cache_ptr = parent_entry->cache_ptr;
@@ -5168,78 +5582,77 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
/* Usage checks */
if(!parent_entry->is_pinned)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't pinned")
- if(0 == parent_entry->flush_dep_height)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't a flush dependency parent")
if(NULL == child_entry->flush_dep_parent)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Child entry doesn't have a flush dependency parent")
- if(0 == parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height])
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry flush dependency ref. count has no child entries of this height")
- if(child_entry->flush_dep_parent != parent_entry)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't flush dependency parent for child entry")
-
- /* Decrement the ref. count for flush dependency height of children for parent entry */
- parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]--;
-
- /* Check for flush dependency ref. count at this height going to zero and
- * parent entry flush dependency height dropping
- */
- if(((child_entry->flush_dep_height + 1) == parent_entry->flush_dep_height) &&
- 0 == parent_entry->child_flush_dep_height_rc[child_entry->flush_dep_height]) {
- int i; /* Local index variable */
-
- /* Reverse scan for new flush dependency height of parent */
-#ifndef NDEBUG
- for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i > (int)child_entry->flush_dep_height; i--)
- HDassert(0 == parent_entry->child_flush_dep_height_rc[i]);
-#endif /* NDEBUG */
- for(i = (int)child_entry->flush_dep_height; i >= 0; i--)
- /* Check for child flush dependencies of this height */
- if(parent_entry->child_flush_dep_height_rc[i] > 0)
- break;
-
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Child entry doesn't have a flush dependency parent array")
+ if(0 == parent_entry->flush_dep_nchildren)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry flush dependency ref. count has no child dependencies")
+
+ /* Search for parent in child's parent array. This is a linear search
+ * because we do not expect large numbers of parents. If this changes, we
+ * may wish to change the parent array to a skip list */
+ for(i=0; i<child_entry->flush_dep_nparents; i++)
+ if(child_entry->flush_dep_parent[i] == parent_entry)
+ break;
+ if(i == child_entry->flush_dep_nparents)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't a flush dependency parent for child entry")
+
+ /* Remove parent entry from child's parent array */
+ if(i < child_entry->flush_dep_nparents - 1)
+ HDmemmove(&child_entry->flush_dep_parent[i],
+ &child_entry->flush_dep_parent[i+1],
+ (child_entry->flush_dep_nparents - i - 1)
+ * sizeof(child_entry->flush_dep_parent[0]));
+ child_entry->flush_dep_nparents--;
+
+ /* Adjust parent entry's nchildren and unpin parent if it goes to zero */
+ parent_entry->flush_dep_nchildren--;
+ if(0 == parent_entry->flush_dep_nchildren) {
/* Sanity check */
- HDassert((unsigned)(i + 1) < parent_entry->flush_dep_height);
+ HDassert(parent_entry->pinned_from_cache);
- /* Check if parent entry is a child in another flush dependency relationship */
- if(NULL != parent_entry->flush_dep_parent) {
- /* Change flush dependency ref. counts of parent's parent */
- H5C_adjust_flush_dependency_rc(parent_entry->flush_dep_parent, parent_entry->flush_dep_height, (unsigned)(i + 1));
- } /* end if */
+ /* Check if we should unpin parent entry now */
+ if(!parent_entry->pinned_from_client) {
+ /* Update the replacement policy if the entry is not protected */
+ if(!parent_entry->is_protected)
+ H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, parent_entry, FAIL)
- /* Increase flush dependency height of parent entry */
- parent_entry->flush_dep_height = (unsigned)(i + 1);
+ /* Unpin the entry now */
+ parent_entry->is_pinned = FALSE;
- /* Check for height of parent dropping to zero (i.e. no longer a
- * parent of _any_ child flush dependencies).
- */
- if(0 == parent_entry->flush_dep_height) {
- /* Sanity check */
- HDassert(parent_entry->pinned_from_cache);
+ /* Update the stats for an unpin operation */
+ H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, parent_entry)
+ } /* end if */
- /* Check if we should unpin parent entry now */
- if(!parent_entry->pinned_from_client) {
- /* Update the replacement policy if the entry is not protected */
- if(!parent_entry->is_protected)
- H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, parent_entry, FAIL)
+ /* Mark the entry as unpinned from the cache's action */
+ parent_entry->pinned_from_cache = FALSE;
+ } /* end if */
- /* Unpin the entry now */
- parent_entry->is_pinned = FALSE;
+ /* Adjust parent entry's ndirty_children */
+ if(child_entry->is_dirty || child_entry->flush_dep_ndirty_children > 0) {
+ /* Sanity check */
+ HDassert(parent_entry->flush_dep_ndirty_children > 0);
- /* Update the stats for an unpin operation */
- H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, parent_entry)
- } /* end if */
+ parent_entry->flush_dep_ndirty_children--;
- /* Mark the entry as unpinned from the cache's action */
- parent_entry->pinned_from_cache = FALSE;
- } /* end if */
+ /* Propagate the flush dep clean flag up the chain if necessary */
+ if(!parent_entry->is_dirty
+ && parent_entry->flush_dep_ndirty_children == 0)
+ if(H5C__mark_flush_dep_clean(parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't propagate flush dep clean flag")
} /* end if */
- /* Reset parent of child entry */
- child_entry->flush_dep_parent = NULL;
-
- /* Post-conditions, for successful operation */
- HDassert(prev_flush_dep_height >= parent_entry->flush_dep_height);
- HDassert(NULL == child_entry->flush_dep_parent);
+ /* Shrink or free the parent array if apporpriate */
+ if(child_entry->flush_dep_nparents == 0) {
+ child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_FREE(parent, child_entry->flush_dep_parent);
+ child_entry->flush_dep_parent_nalloc = 0;
+ } /* end if */
+ else if(child_entry->flush_dep_parent_nalloc > H5C_FLUSH_DEP_PARENT_INIT
+ && child_entry->flush_dep_nparents
+ <= (child_entry->flush_dep_parent_nalloc / 4)) {
+ if(NULL == (child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_REALLOC(parent, child_entry->flush_dep_parent, (child_entry->flush_dep_parent_nalloc / 4) * sizeof(H5C_cache_entry_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
+ child_entry->flush_dep_parent_nalloc /= 4;
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5923,6 +6336,8 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
( bytes_evicted < eviction_size_limit ) )
{
+ hbool_t corked = FALSE;
+
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert( ! (entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
@@ -5936,7 +6351,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
prev_is_dirty = prev_ptr->is_dirty;
}
- if ( entry_ptr->is_dirty ) {
+ /* dirty corked entry is skipped */
+ if(entry_ptr->is_corked && entry_ptr->is_dirty)
+ corked = TRUE;
+ else if ( entry_ptr->is_dirty ) {
/* reset entries_removed_counter and
* last_entry_removed_ptr prior to the call to
@@ -5967,7 +6385,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
if ( prev_ptr != NULL ) {
- if ( ( restart_scan )
+ if(corked) /* dirty corked entry is skipped */
+ entry_ptr = prev_ptr;
+
+ else if ( ( restart_scan )
||
( prev_ptr->is_dirty != prev_is_dirty )
||
@@ -6584,17 +7005,19 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
} /* end while */
/* Invariants, after destroying all entries in the hash table */
- HDassert(cache_ptr->index_size == 0);
- HDassert(cache_ptr->clean_index_size == 0);
- HDassert(cache_ptr->dirty_index_size == 0);
- HDassert(cache_ptr->slist_len == 0);
- HDassert(cache_ptr->slist_size == 0);
- HDassert(cache_ptr->pel_len == 0);
- HDassert(cache_ptr->pel_size == 0);
- HDassert(cache_ptr->pl_len == 0);
- HDassert(cache_ptr->pl_size == 0);
- HDassert(cache_ptr->LRU_list_len == 0);
- HDassert(cache_ptr->LRU_list_size == 0);
+ if(!(flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG)) {
+ HDassert(cache_ptr->index_size == 0);
+ HDassert(cache_ptr->clean_index_size == 0);
+ HDassert(cache_ptr->dirty_index_size == 0);
+ HDassert(cache_ptr->slist_len == 0);
+ HDassert(cache_ptr->slist_size == 0);
+ HDassert(cache_ptr->pel_len == 0);
+ HDassert(cache_ptr->pel_size == 0);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->pl_size == 0);
+ HDassert(cache_ptr->LRU_list_len == 0);
+ HDassert(cache_ptr->LRU_list_size == 0);
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -6647,8 +7070,8 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
int32_t i;
int32_t cur_ring_pel_len;
int32_t old_ring_pel_len;
- int32_t passes = 0;
unsigned cooked_flags;
+ unsigned evict_flags;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
@@ -6678,10 +7101,9 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
HDassert(cache_ptr->epoch_markers_active == 0);
/* Filter out the flags that are not relevant to the flush/invalidate.
- * At present, only the H5C__FLUSH_CLEAR_ONLY_FLAG is kept.
*/
cooked_flags = flags & H5C__FLUSH_CLEAR_ONLY_FLAG;
-
+ evict_flags = flags & H5C__EVICT_ALLOW_LAST_PINS_FLAG;
/* The flush proceedure here is a bit strange.
*
@@ -6692,10 +7114,8 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*
* Since the fractal heap can dirty, resize, and/or move entries
* in is flush callback, it is possible that the cache will still
- * contain dirty entries at this point. If so, we must make up to
- * H5C__MAX_PASSES_ON_FLUSH more passes through the skip list
- * to allow it to empty. If is is not empty at this point, we again
- * scream and die.
+ * contain dirty entries at this point. If so, we must make more
+ * passes through the skip list to allow it to empty.
*
* Further, since clean entries can be dirtied, resized, and/or moved
* as the result of a flush call back (either the entries own, or that
@@ -6728,402 +7148,353 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
old_ring_pel_len = cur_ring_pel_len;
while(cache_ptr->index_ring_len[ring] > 0) {
- unsigned curr_flush_dep_height = 0;
- unsigned flush_dep_passes = 0;
-
- /* Loop over all flush dependency heights of entries */
- while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->index_ring_len[ring] > 0) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
- hbool_t flushed_during_dep_loop = FALSE;
-
- /* first, try to flush-destroy any dirty entries. Do this by
- * making a scan through the slist. Note that new dirty entries
- * may be created by the flush call backs. Thus it is possible
- * that the slist will not be empty after we finish the scan.
- */
+ /* first, try to flush-destroy any dirty entries. Do this by
+ * making a scan through the slist. Note that new dirty entries
+ * may be created by the flush call backs. Thus it is possible
+ * that the slist will not be empty after we finish the scan.
+ */
#if H5C_DO_SANITY_CHECKS
- /* Depending on circumstances, H5C__flush_single_entry() will
- * remove dirty entries from the slist as it flushes them.
- * Thus for sanity checks we must make note of the initial
- * slist length and size before we do any flushes.
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
-
- /* There is also the possibility that entries will be
- * dirtied, resized, moved, and/or removed from the cache
- * as the result of calls to the flush callbacks. We use
- * the slist_len_increase and slist_size_increase increase
- * fields in struct H5C_t to track these changes for purpose
- * of sanity checking.
- *
- * To this end, we must zero these fields before we start
- * the pass through the slist.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* Depending on circumstances, H5C__flush_single_entry() will
+ * remove dirty entries from the slist as it flushes them.
+ * Thus for sanity checks we must make note of the initial
+ * slist length and size before we do any flushes.
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
+
+ /* There is also the possibility that entries will be
+ * dirtied, resized, moved, and/or removed from the cache
+ * as the result of calls to the flush callbacks. We use
+ * the slist_len_increase and slist_size_increase increase
+ * fields in struct H5C_t to track these changes for purpose
+ * of sanity checking.
+ *
+ * To this end, we must zero these fields before we start
+ * the pass through the slist.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- /* Finally, reset the flushed_slist_len and flushed_slist_size
- * fields to zero, as these fields are used to accumulate
- * the slist lenght and size that we see as we scan through
- * the slist.
- */
- flushed_slist_len = 0;
- flushed_slist_size = 0;
+ /* Finally, reset the flushed_slist_len and flushed_slist_size
+ * fields to zero, as these fields are used to accumulate
+ * the slist lenght and size that we see as we scan through
+ * the slist.
+ */
+ flushed_slist_len = 0;
+ flushed_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- /* set the cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize to false.
- *
- * These flags are set to TRUE by H5C__flush_single_entry if the
- * slist is modified by a pre_serialize or serialize call
- * respectively.
- *
- * H5C_flush_invalidate_cache() uses these flags to detect any
- * modifications to the slist that might corrupt the scan of
- * the slist -- and restart the scan in this event.
- */
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- /* this done, start the scan of the slist */
- restart_slist_scan = TRUE;
- while(restart_slist_scan || (node_ptr != NULL)) {
- if(restart_slist_scan) {
- restart_slist_scan = FALSE;
-
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if(node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
-
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if ( NULL == next_entry_ptr )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- } /* end if */
+ /* set the cache_ptr->slist_change_in_pre_serialize and
+ * cache_ptr->slist_change_in_serialize to false.
+ *
+ * These flags are set to TRUE by H5C__flush_single_entry if the
+ * slist is modified by a pre_serialize or serialize call
+ * respectively.
+ *
+ * H5C_flush_invalidate_cache() uses these flags to detect any
+ * modifications to the slist that might corrupt the scan of
+ * the slist -- and restart the scan in this event.
+ */
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ /* this done, start the scan of the slist */
+ restart_slist_scan = TRUE;
+ while(restart_slist_scan || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
+
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
- entry_ptr = next_entry_ptr;
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- /* It is possible that entries will be dirtied, resized,
- * flushed, or removed from the cache via the take ownership
- * flag as the result of pre_serialize or serialized callbacks.
- *
- * This in turn can corrupt the scan through the slist.
- *
- * We test for slist modifications in the pre_serialize
- * and serialize callbacks, and restart the scan of the
- * slist if we find them. However, best we do some extra
- * sanity checking just in case.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
- HDassert(entry_ptr->ring >= ring);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ } /* end if */
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if(node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
+ entry_ptr = next_entry_ptr;
- /* Note that we now remove nodes from the slist as we flush
- * the associated entries, instead of leaving them there
- * until we are done, and then destroying all nodes in
- * the slist.
- *
- * While this optimization used to be easy, with the possibility
- * of new entries being added to the slist in the midst of the
- * flush, we must keep the slist in cannonical form at all
- * times.
- */
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->in_slist);
+ /* It is possible that entries will be dirtied, resized,
+ * flushed, or removed from the cache via the take ownership
+ * flag as the result of pre_serialize or serialized callbacks.
+ *
+ * This in turn can corrupt the scan through the slist.
+ *
+ * We test for slist modifications in the pre_serialize
+ * and serialize callbacks, and restart the scan of the
+ * slist if we find them. However, best we do some extra
+ * sanity checking just in case.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(entry_ptr->ring >= ring);
- if(((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->ring == ring)) {
- if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush
- * everything we can before we flag an error.
- */
- protected_entries++;
- } else if(entry_ptr->is_pinned) {
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ HDassert(next_entry_ptr->ring >= ring);
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ /* Note that we now remove nodes from the slist as we flush
+ * the associated entries, instead of leaving them there
+ * until we are done, and then destroying all nodes in
+ * the slist.
+ *
+ * While this optimization used to be easy, with the possibility
+ * of new entries being added to the slist in the midst of the
+ * flush, we must keep the slist in cannonical form at all
+ * times.
+ */
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we have major problems -- but lets flush
+ * everything we can before we flag an error.
+ */
+ protected_entries++;
+ } else if(entry_ptr->is_pinned) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush, but don't tell
- * H5C__flush_single_entry() to destroy the entry
- * as pinned entries can't be evicted.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
#if H5C_DO_SANITY_CHECKS
- /* update flushed_slist_len & flushed_slist_size
- * before the flush. Note that the entry will
- * be removed from the slist after the flush,
- * and thus may be resized by the flush callback.
- * This is OK, as we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- */
- flushed_slist_len++;
- flushed_slist_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
#if H5C_DO_SANITY_CHECKS
- /* entry size may have changed during the flush.
- * Update flushed_slist_size to account for this.
- */
- flushed_slist_size += entry_size_change;
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
} /* end if */
- else {
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+ } /* end if */
+ else {
#if H5C_DO_SANITY_CHECKS
- /* update flushed_slist_len & flushed_slist_size
- * before the flush. Note that the entry will
- * be removed from the slist after the flush,
- * and thus may be resized by the flush callback.
- * This is OK, as we will catch the size delta in
- * cache_ptr->slist_size_increase.
- *
- */
- flushed_slist_len++;
- flushed_slist_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ /* update flushed_slist_len & flushed_slist_size
+ * before the flush. Note that the entry will
+ * be removed from the slist after the flush,
+ * and thus may be resized by the flush callback.
+ * This is OK, as we will catch the size delta in
+ * cache_ptr->slist_size_increase.
+ *
+ */
+ flushed_slist_len++;
+ flushed_slist_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
- (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
- entry_size_change_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
#if H5C_DO_SANITY_CHECKS
- /* entry size may have changed during the flush.
- * Update flushed_slist_size to account for this.
- */
- flushed_slist_size += entry_size_change;
+ /* entry size may have changed during the flush.
+ * Update flushed_slist_size to account for this.
+ */
+ flushed_slist_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
-
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else */
- } /* end if */
- } /* end while loop scanning skip list */
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ } /* end while loop scanning skip list */
#if H5C_DO_SANITY_CHECKS
- /* It is possible that entries were added to the slist during
- * the scan, either before or after scan pointer. The following
- * asserts take this into account.
- *
- * Don't bother with the sanity checks if node_ptr != NULL, as
- * in this case we broke out of the loop because it got changed
- * out from under us.
- */
+ /* It is possible that entries were added to the slist during
+ * the scan, either before or after scan pointer. The following
+ * asserts take this into account.
+ *
+ * Don't bother with the sanity checks if node_ptr != NULL, as
+ * in this case we broke out of the loop because it got changed
+ * out from under us.
+ */
- if(node_ptr == NULL) {
- HDassert((flushed_slist_len + cache_ptr->slist_len) ==
- (initial_slist_len + cache_ptr->slist_len_increase));
- HDassert((flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
- ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
- } /* end if */
+ if(node_ptr == NULL) {
+ HDassert((flushed_slist_len + cache_ptr->slist_len) ==
+ (initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert((flushed_slist_size + (int64_t)cache_ptr->slist_size) ==
+ ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ } /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
- /* Since we are doing a destroy, we must make a pass through
- * the hash table and try to flush - destroy all entries that
- * remain.
- *
- * It used to be that all entries remaining in the cache at
- * this point had to be clean, but with the fractal heap mods
- * this may not be the case. If so, we will flush entries out
- * of increasing address order.
- *
- * Writes to disk are possible here.
- */
- for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
- next_entry_ptr = cache_ptr->index[i];
-
- while(next_entry_ptr != NULL) {
- entry_ptr = next_entry_ptr;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
-
- next_entry_ptr = entry_ptr->ht_next;
- HDassert((next_entry_ptr == NULL) ||
- (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
-
- if(((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->ring == ring)) {
-
- if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
- * error.
- */
- protected_entries++;
- if(!entry_ptr->in_slist)
- HDassert(!(entry_ptr->is_dirty));
- } else if(!(entry_ptr->is_pinned)) {
+ /* Since we are doing a destroy, we must make a pass through
+ * the hash table and try to flush - destroy all entries that
+ * remain.
+ *
+ * It used to be that all entries remaining in the cache at
+ * this point had to be clean, but with the fractal heap mods
+ * this may not be the case. If so, we will flush entries out
+ * of increasing address order.
+ *
+ * Writes to disk are possible here.
+ */
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ next_entry_ptr = cache_ptr->index[i];
+
+ while(next_entry_ptr != NULL) {
+ entry_ptr = next_entry_ptr;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
+
+ next_entry_ptr = entry_ptr->ht_next;
+ HDassert((next_entry_ptr == NULL) ||
+ (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
+
+ if(((!entry_ptr->flush_me_last) ||
+ ((entry_ptr->flush_me_last) &&
+ (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
+ (entry_ptr->flush_dep_nchildren == 0) &&
+ (entry_ptr->ring == ring)) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush.
+ if(entry_ptr->is_protected) {
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
+ */
+ protected_entries++;
+ if(!entry_ptr->in_slist)
+ HDassert(!(entry_ptr->is_dirty));
+ } else if(!(entry_ptr->is_pinned)) {
+
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * If this happens, and one of the target
+ * entries happens to be the next entry in
+ * the hash bucket, we could find ourselves
+ * either find ourselves either scanning a
+ * non-existant entry, scanning through a
+ * different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if *entry_ptr was dirty,
+ * on the off chance that the next entry was
+ * a target.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ hbool_t entry_was_dirty;
+
+ entry_was_dirty = entry_ptr->is_dirty;
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
+ NULL) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
+
+ if(entry_was_dirty) {
+ /* update stats for hash bucket scan
+ * restart here.
+ * -- JRM
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
- * into the cache, or moved to a new location
- * in the file as a side effect of the flush.
- *
- * If this happens, and one of the target
- * entries happens to be the next entry in
- * the hash bucket, we could find ourselves
- * either find ourselves either scanning a
- * non-existant entry, scanning through a
- * different bucket, or skipping an entry.
- *
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
- * after the flush if *entry_ptr was dirty,
- * on the off chance that the next entry was
- * a target.
- *
- * This is not as inefficient at it might seem,
- * as hash buckets typically have at most two
- * or three entries.
- */
- hbool_t entry_was_dirty;
-
- entry_was_dirty = entry_ptr->is_dirty;
-
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
- (cooked_flags | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG),
- NULL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
-
- if(entry_was_dirty) {
- /* update stats for hash bucket scan
- * restart here.
- * -- JRM
- */
- next_entry_ptr = cache_ptr->index[i];
- H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
- } /* end if */
-
- flushed_during_dep_loop = TRUE;
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
+ next_entry_ptr = cache_ptr->index[i];
+ H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
} /* end if */
} /* end if */
- /* We can't do anything if the entry is pinned. The
- * hope is that the entry will be unpinned as the
- * result of destroys of entries that reference it.
- *
- * We detect this by noting the change in the number
- * of pinned entries from pass to pass. If it stops
- * shrinking before it hits zero, we scream and die.
- */
- /* if the serialize function on the entry we last evicted
- * loaded an entry into cache (as Quincey has promised me
- * it never will), and if the cache was full, it is
- * possible that *next_entry_ptr was flushed or evicted.
- *
- * Test to see if this happened here. Note that if this
- * test is triggred, we are accessing a deallocated piece
- * of dynamically allocated memory, so we just scream and
- * die.
- *
- * Update: The code to restart the scan after flushes
- * of dirty entries should make it impossible
- * to satisfy the following test. Leave it in
- * in case I am wrong.
- * -- JRM
- */
- if((next_entry_ptr != NULL) && (next_entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC))
- /* Something horrible has happened to
- * *next_entry_ptr -- scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr->magic is invalid?!?!?.")
- } /* end while loop scanning hash table bin */
- } /* end for loop scanning hash table */
-
- /* Check for incrementing flush dependency height */
- if(flushed_during_dep_loop) {
- /* If we flushed an entry at this flush dependency height
- * start over at the bottom level of the flush dependencies
- */
- curr_flush_dep_height = 0;
-
- /* Make certain we don't get stuck in an infinite loop */
- flush_dep_passes++;
- } /* end if */
- else
- curr_flush_dep_height++;
+ } /* end if */
- } /* end while loop over flush dependency heights */
+ /* We can't do anything if the entry is pinned. The
+ * hope is that the entry will be unpinned as the
+ * result of destroys of entries that reference it.
+ *
+ * We detect this by noting the change in the number
+ * of pinned entries from pass to pass. If it stops
+ * shrinking before it hits zero, we scream and die.
+ */
+ /* if the serialize function on the entry we last evicted
+ * loaded an entry into cache (as Quincey has promised me
+ * it never will), and if the cache was full, it is
+ * possible that *next_entry_ptr was flushed or evicted.
+ *
+ * Test to see if this happened here. Note that if this
+ * test is triggred, we are accessing a deallocated piece
+ * of dynamically allocated memory, so we just scream and
+ * die.
+ *
+ * Update: The code to restart the scan after flushes
+ * of dirty entries should make it impossible
+ * to satisfy the following test. Leave it in
+ * in case I am wrong.
+ * -- JRM
+ */
+ if((next_entry_ptr != NULL) && (next_entry_ptr->magic != H5C__H5C_CACHE_ENTRY_T_MAGIC))
+ /* Something horrible has happened to
+ * *next_entry_ptr -- scream and die.
+ */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr->magic is invalid?!?!?.")
+ } /* end while loop scanning hash table bin */
+ } /* end for loop scanning hash table */
old_ring_pel_len = cur_ring_pel_len;
entry_ptr = cache_ptr->pel_head_ptr;
@@ -7139,20 +7510,19 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
} /* end while */
if((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* Don't error if allowed to have pinned entries remaining */
+ if(evict_flags)
+ HGOTO_DONE(TRUE)
+
/* The number of pinned entries in the ring is positive, and
* it is not declining. Scream and die.
*/
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
- } else if((cur_ring_pel_len == 0) && (old_ring_pel_len == 0)) {
- /* increment the pass count */
- passes++;
- }
+ } /* end if */
- if(passes >= H5C__MAX_PASSES_ON_FLUSH)
- /* we have exceeded the maximum number of passes through the
- * cache to flush and destroy all entries. Scream and die.
- */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Maximum passes on flush exceeded.")
+ HDassert(protected_entries == cache_ptr->pl_len);
+ if((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
} /* main while loop */
/* Invariants, after destroying all entries in the ring */
@@ -7206,12 +7576,12 @@ herr_t
H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
{
H5C_t * cache_ptr = f->shared->cache;
+ hbool_t destroy;
hbool_t flushed_entries_last_pass;
hbool_t flush_marked_entries;
hbool_t ignore_protected;
hbool_t tried_to_flush_protected_entry = FALSE;
hbool_t restart_slist_scan;
- int32_t passes = 0;
int32_t protected_entries = 0;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
@@ -7246,6 +7616,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
+ destroy = ( (flags & H5C__FLUSH_INVALIDATE_FLAG) != 0 );
flush_marked_entries = ( (flags & H5C__FLUSH_MARKED_ENTRIES_FLAG) != 0 );
if(!flush_marked_entries)
@@ -7273,262 +7644,219 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
cache_ptr->slist_change_in_pre_serialize = FALSE;
cache_ptr->slist_change_in_serialize = FALSE;
- while((passes < H5C__MAX_PASSES_ON_FLUSH) &&
- (cache_ptr->slist_ring_len[ring] > 0) &&
+ while((cache_ptr->slist_ring_len[ring] > 0) &&
(protected_entries == 0) &&
(flushed_entries_last_pass)) {
- unsigned curr_flush_dep_height = 0;
- unsigned flush_dep_passes = 0;
-
flushed_entries_last_pass = FALSE;
- /* Loop over all flush dependency heights of entries */
- while((curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) &&
- (cache_ptr->slist_ring_len[ring] > 0) &&
- (flush_dep_passes < H5C__MAX_PASSES_ON_FLUSH)) {
- hbool_t flushed_during_dep_loop = FALSE;
-
#if H5C_DO_SANITY_CHECKS
- /* For sanity checking, try to verify that the skip list has
- * the expected size and number of entries at the end of each
- * internal while loop (see below).
- *
- * Doing this get a bit tricky, as depending on flags, we may
- * or may not flush all the entries in the slist.
- *
- * To make things more entertaining, with the advent of the
- * fractal heap, the entry serialize callback can cause entries
- * to be dirtied, resized, and/or moved. Also, the
- * pre_serialize callback can result in an entry being
- * removed from the cache via the take ownership flag.
- *
- * To deal with this, we first make note of the initial
- * skip list length and size:
- */
- initial_slist_len = cache_ptr->slist_len;
- initial_slist_size = cache_ptr->slist_size;
+ /* For sanity checking, try to verify that the skip list has
+ * the expected size and number of entries at the end of each
+ * internal while loop (see below).
+ *
+ * Doing this get a bit tricky, as depending on flags, we may
+ * or may not flush all the entries in the slist.
+ *
+ * To make things more entertaining, with the advent of the
+ * fractal heap, the entry serialize callback can cause entries
+ * to be dirtied, resized, and/or moved. Also, the
+ * pre_serialize callback can result in an entry being
+ * removed from the cache via the take ownership flag.
+ *
+ * To deal with this, we first make note of the initial
+ * skip list length and size:
+ */
+ initial_slist_len = cache_ptr->slist_len;
+ initial_slist_size = cache_ptr->slist_size;
- /* We then zero counters that we use to track the number
- * and total size of entries flushed:
- */
- flushed_entries_count = 0;
- flushed_entries_size = 0;
-
- /* As mentioned above, there is the possibility that
- * entries will be dirtied, resized, flushed, or removed
- * from the cache via the take ownership flag during
- * our pass through the skip list. To capture the number
- * of entries added, and the skip list size delta,
- * zero the slist_len_increase and slist_size_increase of
- * the cache's instance of H5C_t. These fields will be
- * updated elsewhere to account for slist insertions and/or
- * dirty entry size changes.
- */
- cache_ptr->slist_len_increase = 0;
- cache_ptr->slist_size_increase = 0;
+ /* We then zero counters that we use to track the number
+ * and total size of entries flushed:
+ */
+ flushed_entries_count = 0;
+ flushed_entries_size = 0;
+
+ /* As mentioned above, there is the possibility that
+ * entries will be dirtied, resized, flushed, or removed
+ * from the cache via the take ownership flag during
+ * our pass through the skip list. To capture the number
+ * of entries added, and the skip list size delta,
+ * zero the slist_len_increase and slist_size_increase of
+ * the cache's instance of H5C_t. These fields will be
+ * updated elsewhere to account for slist insertions and/or
+ * dirty entry size changes.
+ */
+ cache_ptr->slist_len_increase = 0;
+ cache_ptr->slist_size_increase = 0;
- /* at the end of the loop, use these values to compute the
- * expected slist length and size and compare this with the
- * value recorded in the cache's instance of H5C_t.
- */
+ /* at the end of the loop, use these values to compute the
+ * expected slist length and size and compare this with the
+ * value recorded in the cache's instance of H5C_t.
+ */
#endif /* H5C_DO_SANITY_CHECKS */
- restart_slist_scan = TRUE;
+ restart_slist_scan = TRUE;
- while((restart_slist_scan ) || (node_ptr != NULL)) {
- if(restart_slist_scan) {
- restart_slist_scan = FALSE;
+ while((restart_slist_scan ) || (node_ptr != NULL)) {
+ if(restart_slist_scan) {
+ restart_slist_scan = FALSE;
- /* Start at beginning of skip list */
- node_ptr = H5SL_first(cache_ptr->slist_ptr);
+ /* Start at beginning of skip list */
+ node_ptr = H5SL_first(cache_ptr->slist_ptr);
- if(node_ptr == NULL)
- /* the slist is empty -- break out of inner loop */
- break;
+ if(node_ptr == NULL)
+ /* the slist is empty -- break out of inner loop */
+ break;
- /* Get cache entry for this node */
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ /* Get cache entry for this node */
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
- } /* end if */
-
- entry_ptr = next_entry_ptr;
-
- /* With the advent of the fractal heap, the free space
- * manager, and the version 3 cache, it is possible
- * that the pre-serialize or serialize callback will
- * dirty, resize, or take ownership of other entries
- * in the cache.
- *
- * To deal with this, I have inserted code to detect any
- * change in the skip list not directly under the control
- * of this function. If such modifications are detected,
- * we must re-start the scan of the skip list to avoid
- * the possibility that the target of the next_entry_ptr
- * may have been flushed or deleted from the cache.
- *
- * To verify that all such possibilities have been dealt
- * with, we do a bit of extra sanity checking on
- * entry_ptr.
- */
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->in_slist);
- HDassert(entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ } /* end if */
+
+ entry_ptr = next_entry_ptr;
+
+ /* With the advent of the fractal heap, the free space
+ * manager, and the version 3 cache, it is possible
+ * that the pre-serialize or serialize callback will
+ * dirty, resize, or take ownership of other entries
+ * in the cache.
+ *
+ * To deal with this, I have inserted code to detect any
+ * change in the skip list not directly under the control
+ * of this function. If such modifications are detected,
+ * we must re-start the scan of the skip list to avoid
+ * the possibility that the target of the next_entry_ptr
+ * may have been flushed or deleted from the cache.
+ *
+ * To verify that all such possibilities have been dealt
+ * with, we do a bit of extra sanity checking on
+ * entry_ptr.
+ */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->in_slist);
+ HDassert(entry_ptr->is_dirty);
+ if(!flush_marked_entries || entry_ptr->flush_marker)
HDassert(entry_ptr->ring >= ring);
- /* increment node pointer now, before we delete its target
- * from the slist.
- */
- node_ptr = H5SL_next(node_ptr);
- if(node_ptr != NULL) {
- next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
- if(NULL == next_entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
-
- HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(next_entry_ptr->is_dirty);
- HDassert(next_entry_ptr->in_slist);
+ /* increment node pointer now, before we delete its target
+ * from the slist.
+ */
+ node_ptr = H5SL_next(node_ptr);
+ if(node_ptr != NULL) {
+ next_entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+ if(NULL == next_entry_ptr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+ if(!flush_marked_entries || next_entry_ptr->flush_marker)
HDassert(next_entry_ptr->ring >= ring);
- HDassert(entry_ptr != next_entry_ptr);
- } /* end if */
- else
- next_entry_ptr = NULL;
-
- HDassert(entry_ptr != NULL);
- HDassert(entry_ptr->in_slist);
- if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
- ((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->ring == ring)) {
- if(entry_ptr->is_protected) {
- /* we probably have major problems -- but lets
- * flush everything we can before we decide
- * whether to flag an error.
- */
- tried_to_flush_protected_entry = TRUE;
- protected_entries++;
- } /* end if */
- else if(entry_ptr->is_pinned) {
+ HDassert(entry_ptr != next_entry_ptr);
+ } /* end if */
+ else
+ next_entry_ptr = NULL;
+
+ HDassert(entry_ptr != NULL);
+ HDassert(entry_ptr->in_slist);
+
+ if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
+ ((!entry_ptr->flush_me_last) ||
+ (entry_ptr->flush_me_last &&
+ ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
+ (flush_marked_entries && entry_ptr->flush_marker)))) &&
+ ( ( entry_ptr->flush_dep_nchildren == 0 ) ||
+ ( ( ! destroy ) &&
+ ( entry_ptr->flush_dep_ndirty_children == 0 ) ) ) &&
+ (entry_ptr->ring == ring)) {
+ if(entry_ptr->is_protected) {
+ /* we probably have major problems -- but lets
+ * flush everything we can before we decide
+ * whether to flag an error.
+ */
+ tried_to_flush_protected_entry = TRUE;
+ protected_entries++;
+ } /* end if */
+ else if(entry_ptr->is_pinned) {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
- */
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
-
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else-if */
- else {
- /* Test to see if we are can flush the entry now.
- * If we can, go ahead and flush. Note that we
- * aren't trying to do a destroy here, so that
- * is not an issue.
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
*/
- if(entry_ptr->flush_dep_height == curr_flush_dep_height) {
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
+
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
+
+ flushed_entries_last_pass = TRUE;
+ } /* end else-if */
+ else {
#if H5C_DO_SANITY_CHECKS
- flushed_entries_count++;
- flushed_entries_size += (int64_t)entry_ptr->size;
- entry_size_change = 0;
+ flushed_entries_count++;
+ flushed_entries_size += (int64_t)entry_ptr->size;
+ entry_size_change = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flags, entry_size_change_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
#if H5C_DO_SANITY_CHECKS
- /* it is possible that the entry size changed
- * during flush -- update flushed_entries_size
- * to account for this.
- */
- flushed_entries_size += entry_size_change;
+ /* it is possible that the entry size changed
+ * during flush -- update flushed_entries_size
+ * to account for this.
+ */
+ flushed_entries_size += entry_size_change;
#endif /* H5C_DO_SANITY_CHECKS */
- flushed_during_dep_loop = TRUE;
-
- if((cache_ptr->slist_change_in_serialize) ||
- (cache_ptr->slist_change_in_pre_serialize)) {
- /* The slist has been modified by something
- * other than the simple removal of the
- * of the flushed entry after the flush.
- *
- * This has the potential to corrupt the
- * scan through the slist, so restart it.
- */
- restart_slist_scan = TRUE;
- cache_ptr->slist_change_in_pre_serialize = FALSE;
- cache_ptr->slist_change_in_serialize = FALSE;
-
- H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
- } /* end if */
- } /* end if */
- else if(entry_ptr->flush_dep_height < curr_flush_dep_height)
- /* This shouldn't happen -- if it does, just scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry below current flush dep. height.")
- } /* end else */
- } /* end if */
- } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
-
- /* Check for incrementing flush dependency height */
- if(flushed_during_dep_loop) {
-
- /* If we flushed an entry at this flush dependency height
- * start over at the bottom level of the flush dependencies
- */
- curr_flush_dep_height = 0;
+ if((cache_ptr->slist_change_in_serialize) ||
+ (cache_ptr->slist_change_in_pre_serialize)) {
+ /* The slist has been modified by something
+ * other than the simple removal of the
+ * of the flushed entry after the flush.
+ *
+ * This has the potential to corrupt the
+ * scan through the slist, so restart it.
+ */
+ restart_slist_scan = TRUE;
+ cache_ptr->slist_change_in_pre_serialize = FALSE;
+ cache_ptr->slist_change_in_serialize = FALSE;
- /* Make certain we don't get stuck in an infinite loop */
- flush_dep_passes++;
+ H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
+ } /* end if */
- /* Set flag for outer loop */
- flushed_entries_last_pass = TRUE;
+ flushed_entries_last_pass = TRUE;
+ } /* end else */
} /* end if */
- else
- curr_flush_dep_height++;
- } /* while ( curr_flush_dep_height <= H5C__NUM_FLUSH_DEP_HEIGHTS) */
-
- passes++;
+ } /* while ( ( restart_slist_scan ) || ( node_ptr != NULL ) ) */
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
@@ -7545,9 +7873,6 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
if(((cache_ptr->pl_len > 0) && (!ignore_protected)) || (tried_to_flush_protected_entry))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "cache has protected items")
- if((cache_ptr->slist_len != 0) && (passes >= H5C__MAX_PASSES_ON_FLUSH))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush pass limit exceeded.")
-
#if H5C_DO_SANITY_CHECKS
if(!flush_marked_entries) {
HDassert(cache_ptr->slist_ring_len[ring] == 0);
@@ -7604,7 +7929,7 @@ done:
*
* Refactored function to delay all modifications of the
* metadata cache data structures until after any calls
- * to the pre-serialize or serialize callbacks.
+ * to the pre-serialize or serialize callbacks.
*
* Need to do this, as some pre-serialize or serialize
* calls result in calls to the metadata cache and
@@ -7668,7 +7993,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
else
destroy_entry = destroy;
- /* we will write the entry to disk if it exists, is dirty, and if the
+ /* we will write the entry to disk if it exists, is dirty, and if the
* clear only flag is not set.
*/
if(entry_ptr->is_dirty && !clear_only)
@@ -8057,7 +8382,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
/* only log a flush if we actually wrote to disk */
H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
- }
+ } /* end else if */
if(destroy) {
if(take_ownership)
@@ -8066,7 +8391,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HDassert(destroy_entry);
H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
- }
+ } /* end if */
/* If the entry's type has a 'notify' callback and the entry is about
* to be removed from the cache, send a 'before eviction' notice while
@@ -8097,6 +8422,33 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
+
+#if 0 /* this is useful debugging code -- leave it in for now. -- JRM */
+ if ( ( entry_ptr->flush_dep_nparents > 0 ) ||
+ ( entry_ptr->flush_dep_nchildren > 0 ) ) {
+
+ int i;
+
+ HDfprintf(stdout,
+ "\n\nattempting to evict entry of type \"%s\" at 0X%llx:\n",
+ entry_ptr->type->name, (long long)(entry_ptr->addr));
+
+ for ( i = 0; i < entry_ptr->flush_dep_nparents; i++ ) {
+
+ HDfprintf(stdout,
+ " with FD parent of type \"%s\" at 0X%llx.\n",
+ entry_ptr->flush_dep_parent[i]->type->name,
+ (long long)(entry_ptr->flush_dep_parent[i]->addr));
+ }
+
+ HDfprintf(stdout, " with %d FD children.\n\n",
+ entry_ptr->flush_dep_nchildren);
+ }
+#endif /* this is useful debugging code -- leave it in for now. -- JRM */
+
+ /* verify that the entry is no longer part of any flush dependencies */
+ HDassert(entry_ptr->flush_dep_nparents == 0);
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
}
else {
HDassert(clear_only || write_entry);
@@ -8126,6 +8478,17 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
if(entry_ptr->type->clear && (entry_ptr->type->clear)(f, (void *)entry_ptr, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to clear entry")
+
+ /* Propagate the clean flag up the flush dependency chain if
+ * appropriate */
+ if(was_dirty) {
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag")
+ } /* end if */
+
}
/* reset the flush_in progress flag */
@@ -8258,6 +8621,81 @@ done:
/*-------------------------------------------------------------------------
*
+ * Function: H5C_verify_len_eoa
+ *
+ * Purpose: Verify that 'len' does not exceed eoa when 'actual' is
+ * false i.e. 'len" is the initial speculative length from
+ * get_load_size callback with null image pointer.
+ * If exceed, adjust 'len' accordingly.
+ *
+ * Verify that 'len' should not exceed eoa when 'actual' is
+ * true i.e. 'len' is the actual length from get_load_size
+ * callback with non-null image pointer.
+ * If exceed, return error.
+ *
+ * The coding is copied and moved from H5C_load_entry().
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Vailin Choi
+ * 9/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_verify_len_eoa (H5F_t * f,
+ const H5C_class_t * type,
+ haddr_t addr,
+ size_t *len,
+ htri_t actual)
+{
+ haddr_t eoa; /* End-of-allocation in the file */
+ H5FD_mem_t cooked_type;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
+ * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
+ * Thus we do the same for purposes of computing the eoa
+ * for sanity checks.
+ */
+ cooked_type = (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
+
+ /* Get the file's end-of-allocation value */
+ eoa = H5F_get_eoa(f, cooked_type);
+
+ HDassert(H5F_addr_defined(eoa));
+
+ /* Check for bad address in general */
+ if ( H5F_addr_gt(addr, eoa) )
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "address of object past end of allocation")
+
+ /* Check if the amount of data to read will be past the eoa */
+ if( H5F_addr_gt((addr + *len), eoa) ) {
+
+ if(actual)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA.")
+ else
+ /* Trim down the length of the metadata */
+ /* Note that for some cache clients, this will cause an
+ * assertion failure. JRM -- 8/29/14
+ */
+ *len = (size_t)(eoa - addr);
+ }
+
+ if ( *len <= 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA.")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_verify_len_eoa() */
+
+
+
+/*-------------------------------------------------------------------------
+ *
* Function: H5C_load_entry
*
* Purpose: Attempt to load the entry at the specified disk address
@@ -8290,11 +8728,10 @@ H5C_load_entry(H5F_t * f,
/* known -- otherwise uncompressed. */
/* Zero indicates compression not */
/* enabled. */
- void * image = NULL; /* Buffer for disk image */
+ uint8_t * image = NULL; /* Buffer for disk image */
void * thing = NULL; /* Pointer to thing loaded */
H5C_cache_entry_t * entry; /* Alias for thing loaded, as cache entry */
size_t len; /* Size of image in file */
- unsigned u; /* Local index variable */
void * ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -8306,12 +8743,12 @@ H5C_load_entry(H5F_t * f,
/* verify absence of prohibited or unsupported type flag combinations */
HDassert(!(type->flags & H5C__CLASS_NO_IO_FLAG));
-
+
/* for now, we do not combine the speculative load and compressed flags */
HDassert(!((type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) &&
(type->flags & H5C__CLASS_COMPRESSED_FLAG)));
- /* Can't see how skip reads could be usefully combined with
+ /* Can't see how skip reads could be usefully combined with
* either the speculative read or compressed flags. Hence disallow.
*/
HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
@@ -8326,7 +8763,7 @@ H5C_load_entry(H5F_t * f,
/* Call the get_load_size callback, to retrieve the initial
* size of image
*/
- if(type->get_load_size(udata, &len) < 0)
+ if(type->get_load_size(NULL, udata, &len, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't retrieve image size")
HDassert(len > 0);
@@ -8392,221 +8829,130 @@ H5C_load_entry(H5F_t * f,
#else /* modified code */ /* JRM */
- haddr_t eoa; /* End-of-allocation in the file */
- H5FD_mem_t cooked_type;
-
- /* if type == H5FD_MEM_GHEAP, H5F_block_read() forces
- * type to H5FD_MEM_DRAW via its call to H5F__accum_read().
- * Thus we do the same for purposes of computing the eoa
- * for sanity checks.
- */
- cooked_type =
- (type->mem_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type->mem_type;
-
- /* Get the file's end-of-allocation value */
- eoa = H5F_get_eoa(f, cooked_type);
-
- HDassert(H5F_addr_defined(eoa));
-
- /* Check for bad address in general */
- if ( H5F_addr_gt(addr, eoa) )
-
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, \
- "address of object past end of allocation")
-
- /* Check if the amount of data to read will be past the eoa */
- if( H5F_addr_gt((addr + len), eoa) ) {
-
- /* Trim down the length of the metadata */
-
- /* Note that for some cache clients, this will cause an
- * assertion failure. JRM -- 8/29/14
- */
- len = (size_t)(eoa - addr);
- }
-
- if ( len <= 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, \
- "len not positive after adjustment for EOA.")
+ if(H5C_verify_len_eoa(f, type, addr, &len, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid len with respect to EOA.")
#endif /* modified code */ /* JRM */
}
/* Allocate the buffer for reading the on-disk entry image */
- if(NULL == (image = H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, \
- "memory allocation failed for on disk image buffer.")
+ if(NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer.")
#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)image) + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ HDmemcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
/* Get the on-disk entry image */
- if ( 0 == (type->flags & H5C__CLASS_SKIP_READS) )
- if(H5F_block_read(f, type->mem_type, addr, len, dxpl_id, image) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
-
- /* Deserialize the on-disk image into the native memory form */
- if(NULL == (thing = type->deserialize(image, len, udata, &dirty)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
-
- /* If the client's cache has an image_len callback, check it */
- if(type->image_len) {
- size_t new_len; /* New size of on-disk image */
-
- /* set magic and type field in *entry_ptr. While the image_len
- * callback shouldn't touch the cache specific fields, it may check
- * these fields to ensure that it it has received the expected
- * value.
- *
- * Note that this initialization is repeated below on the off
- * chance that we had to re-try the deserialization.
- */
- entry = (H5C_cache_entry_t *)thing;
- entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry->type = type;
-
- /* verify that compressed and compressed_len are initialized */
- HDassert(compressed == FALSE);
- HDassert(compressed_size == 0);
-
- /* Get the actual image size for the thing */
- if(type->image_len(thing, &new_len, &compressed, &compressed_size) < 0)
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, \
- "can't retrieve image length")
-
- if(new_len == 0)
-
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "image length is 0")
-
- HDassert(((type->flags & H5C__CLASS_COMPRESSED_FLAG) != 0) ||
- ((compressed == FALSE) && (compressed_size == 0)));
- HDassert((compressed == TRUE) || (compressed_size == 0));
-
- if(new_len != len) {
-
- if(type->flags & H5C__CLASS_COMPRESSED_FLAG) {
-
- /* if new_len != len, then compression must be
- * enabled on the entry. In this case, the image_len
- * callback should have set compressed to TRUE, set
- * new_len equal to the uncompressed size of the
- * entry, and compressed_len equal to the compressed
- * size -- which must equal len.
- *
- * We can't verify the uncompressed size, but we can
- * verify the rest with the following assertions.
- */
- HDassert(compressed);
- HDassert(compressed_size == len);
+ if ( 0 == (type->flags & H5C__CLASS_SKIP_READS) ) {
+ unsigned tries, max_tries; /* The # of read attempts */
+ unsigned retries; /* The # of retries */
+ htri_t chk_ret; /* return from verify_chksum callback */
+ size_t actual_len = len;
+ void *new_image; /* Pointer to image */
+
+ /* Get the # of read attempts */
+ max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
+
+ /*
+ * This do/while loop performs the following till the metadata checksum is correct or the
+ * file's allowed read attempts are reached.
+ * --read the metadata
+ * --determine the actual size of the metadata
+ * --perform checksum verification
+ */
+ do {
+ compressed = FALSE;
+ compressed_size = 0;
+
+ if(actual_len != len) {
+ if(NULL == (new_image = H5MM_realloc(image, len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
+ }
- /* new_len should contain the uncompressed size. Set len
- * equal to new_len, so that the cache will use the
- * uncompressed size for purposes of space allocation, etc.
- */
- len = new_len;
+ if(H5F_block_read(f, type->mem_type, addr, len, dxpl_id, image) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+
+ actual_len = len;
- } else if (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) {
+ if(type->get_load_size(image, udata, &len, &actual_len, &compressed, &compressed_size) < 0)
+ continue; /* Transfer control to while() and count towards retries */
- void *new_image; /* Buffer for disk image */
+ HDassert(((type->flags & H5C__CLASS_COMPRESSED_FLAG) != 0) ||
+ ((compressed == FALSE) && (compressed_size == 0)));
+ HDassert((compressed == TRUE) || (compressed_size == 0));
- /* compressed must be FALSE, and compressed_size
- * must be zero.
- */
- HDassert(!compressed);
- HDassert(compressed_size == 0);
+ if(actual_len != len) {
- /* Adjust the size of the image to match new_len */
- if(NULL == (new_image = H5MM_realloc(image,
- new_len + H5C_IMAGE_EXTRA_SPACE)))
+ if(type->flags & H5C__CLASS_COMPRESSED_FLAG) {
+ /* if actual_len != len, then compression must be enabled on the entry.
+ * In this case, the get_load_size callback should have set compressed to TRUE,
+ * compressed_size to the compressed size (which must equal to len),
+ * and actual_len to the uncompressed size of the entry,
+ * We can't verify the uncompressed size, but we can verify the rest
+ * with the following assertions.
+ */
+ HDassert(compressed);
+ HDassert(compressed_size == len);
+ } else if(type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, \
- "image null after H5MM_realloc()")
+ size_t temp_len = actual_len;
- image = new_image;
+ /* compressed must be FALSE, and compressed_size
+ * must be zero.
+ */
+ HDassert(!compressed);
+ HDassert(compressed_size == 0);
+
+ if(H5C_verify_len_eoa(f, type, addr, &temp_len, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA.")
+ HDassert(temp_len == actual_len);
+ if(NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "image null after H5MM_realloc()")
+ image = (uint8_t *)new_image;
#if H5C_DO_MEMORY_SANITY_CHECKS
- HDmemcpy(((uint8_t *)image) + new_len,
- H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+ HDmemcpy(image + actual_len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ /* If the thing's image needs to be bigger for a speculatively
+ * loaded thing, go get the on-disk image again (the extra portion).
+ */
+ if(actual_len > len) {
+ if(H5F_block_read(f, type->mem_type, addr+len, actual_len-len, dxpl_id, image+len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't read image")
+ }
+ } else { /* throw an error */
+ HGOTO_ERROR(H5E_CACHE, H5E_UNSUPPORTED, NULL, "size of non-speculative, non-compressed object changed")
+ }
+ } /* end if (actual_len != len) */
+
+ if(type->verify_chksum == NULL)
+ break;
+
+ if((chk_ret = type->verify_chksum(image, actual_len, udata)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, NULL, "Failure from verify_chksum callback")
+ if(chk_ret == TRUE)
+ break;
+ } while(--tries);
+
+ /* Check for too many tries */
+ if(tries == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "incorrect metadatda checksum after all read attempts")
+
+ /* Calculate and track the # of retries */
+ retries = max_tries - tries;
+ if(retries) { /* Does not track 0 retry */
+ if(H5F_track_metadata_read_retries(f, (unsigned)type->mem_type, retries) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "cannot track read tries = %u ", retries)
+ } /* end if */
+ len = actual_len;
+ } /* end if !H5C__CLASS_SKIP_READS */
- /* If the thing's image needs to be bigger for a speculatively
- * loaded thing, free the thing and retry with new length
- */
- if (new_len > len) {
-
- /* Release previous (possibly partially initialized)
- * thing. Note that we must set entry->magic to
- * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC and set one or
- * two other fields before the call to free_icr
- * so as to avoid sanity check failures.
- */
- entry->magic = H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC;
-
- entry->addr = addr;
-
- if ( type->free_icr(thing) < 0 )
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, \
- "free_icr callback failed")
-
- /* Go get the on-disk image again */
- if(H5F_block_read(f, type->mem_type, addr,
- new_len, dxpl_id, image) < 0)
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
- "Can't read image")
-
- /* Deserialize on-disk image into native memory
- * form again
- */
- if(NULL == (thing = type->deserialize(image, new_len,
- udata, &dirty)))
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, \
- "Can't deserialize image")
-
-#ifndef NDEBUG
- {
- /* new_compressed and new_compressed_size must be
- * initialize to FALSE / 0 respectively, as clients
- * that don't use compression may ignore these two
- * parameters.
- */
- hbool_t new_compressed = FALSE;
- size_t new_compressed_size = 0;
- size_t new_new_len;
-
- /* Get the actual image size for the thing again. Note
- * that since this is a new thing, we have to set
- * the magic and type fields again so as to avoid
- * failing sanity checks.
- */
- entry = (H5C_cache_entry_t *)thing;
- entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry->type = type;
-
- type->image_len(thing, &new_new_len, &new_compressed, &new_compressed_size);
- HDassert(new_new_len == new_len);
- HDassert(!new_compressed);
- HDassert(new_compressed_size == 0);
- }
-#endif /* NDEBUG */
- } /* end if (new_len > len) */
-
- /* Retain adjusted size */
- len = new_len;
-
- } else { /* throw an error */
-
- HGOTO_ERROR(H5E_CACHE, H5E_UNSUPPORTED, NULL, \
- "size of non-speculative, non-compressed object changed")
- }
- } /* end if (new_len != len) */
- } /* end if */
+ /* Deserialize the on-disk image into the native memory form */
+ if(NULL == (thing = type->deserialize(image, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't deserialize image")
entry = (H5C_cache_entry_t *)thing;
@@ -8621,7 +8967,7 @@ H5C_load_entry(H5F_t * f,
*
* HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
*
- * note that type ids 5 & 6 are associated with object headers in the
+ * note that type ids 5 & 6 are associated with object headers in the
* metadata cache.
*
* When we get to using H5C for other purposes, we may wish to
@@ -8660,9 +9006,10 @@ H5C_load_entry(H5F_t * f,
/* Initialize flush dependency height fields */
entry->flush_dep_parent = NULL;
- for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
- entry->child_flush_dep_height_rc[u] = 0;
- entry->flush_dep_height = 0;
+ entry->flush_dep_nparents = 0;
+ entry->flush_dep_parent_nalloc = 0;
+ entry->flush_dep_nchildren = 0;
+ entry->flush_dep_ndirty_children = 0;
entry->ht_next = NULL;
entry->ht_prev = NULL;
@@ -8687,7 +9034,7 @@ done:
"free_icr callback failed")
if(image)
- image = H5MM_xfree(image);
+ image = (uint8_t *)H5MM_xfree(image);
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
@@ -8781,6 +9128,7 @@ H5C_make_space_in_cache(H5F_t * f,
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * prev_ptr;
H5C_cache_entry_t * next_ptr;
+ int32_t num_corked_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -8824,8 +9172,8 @@ H5C_make_space_in_cache(H5F_t * f,
( entry_ptr != NULL )
)
{
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert( ! (entry_ptr->is_protected) );
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert( !(entry_ptr->is_protected) );
HDassert( ! (entry_ptr->is_read_only) );
HDassert( (entry_ptr->ro_ref_count) == 0 );
@@ -8837,7 +9185,13 @@ H5C_make_space_in_cache(H5F_t * f,
prev_is_dirty = prev_ptr->is_dirty;
}
- if ( ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
+ if (entry_ptr->is_corked && entry_ptr->is_dirty) {
+
+ /* Skip "dirty" corked entries. */
+ ++num_corked_entries;
+ didnt_flush_entry = TRUE;
+
+ } else if ( ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
( ! entry_ptr->flush_in_progress ) ) {
didnt_flush_entry = FALSE;
@@ -8857,8 +9211,8 @@ H5C_make_space_in_cache(H5F_t * f,
* last_entry_removed_ptr prior to the call to
* H5C__flush_single_entry() so that we can spot
* unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
* that is no longer in the cache.
*/
cache_ptr->entries_removed_counter = 0;
@@ -8881,7 +9235,6 @@ H5C_make_space_in_cache(H5F_t * f,
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
-
} else {
/* We have enough space so don't flush clean entry. */
#if H5C_COLLECT_CACHE_STATS
@@ -8906,15 +9259,15 @@ H5C_make_space_in_cache(H5F_t * f,
if ( didnt_flush_entry ) {
- /* epoch markers don't get flushed, and we don't touch
+ /* epoch markers don't get flushed, and we don't touch
* entries that are in the process of being flushed.
- * Hence no need for sanity checks, as we haven't
- * flushed anything. Thus just set entry_ptr to prev_ptr
+ * Hence no need for sanity checks, as we haven't
+ * flushed anything. Thus just set entry_ptr to prev_ptr
* and go on.
*/
entry_ptr = prev_ptr;
- } else if ( ( restart_scan )
+ } else if ( ( restart_scan )
||
( prev_ptr->is_dirty != prev_is_dirty )
||
@@ -8977,12 +9330,14 @@ H5C_make_space_in_cache(H5F_t * f,
}
#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* NEED: work on a better assert for corked entries */
HDassert( ( entries_examined > (2 * initial_list_len) ) ||
( (cache_ptr->pl_size + cache_ptr->pel_size + cache_ptr->min_clean_size) >
cache_ptr->max_cache_size ) ||
( ( cache_ptr->clean_index_size + empty_space )
- >= cache_ptr->min_clean_size ) );
-
+ >= cache_ptr->min_clean_size ) ||
+ ( ( num_corked_entries )));
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
HDassert( ( entries_examined > (2 * initial_list_len) ) ||
@@ -9682,7 +10037,7 @@ done:
* Function: H5C_ignore_tags
*
* Purpose: Override all assertion frameworks associated with making
- * sure proper tags are applied to metadata.
+ * sure proper tags are applied to cache entries.
*
* NOTE: This should really only be used in tests that need
* to access internal functions without going through
@@ -9737,7 +10092,7 @@ static herr_t
H5C_tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr, hid_t dxpl_id)
{
H5P_genplist_t *dxpl; /* dataset transfer property list */
- haddr_t tag; /* Tag address */
+ H5C_tag_t tag; /* Tag structure */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -9752,13 +10107,13 @@ H5C_tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr, hid_t dxpl_id)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
/* Get the tag from the DXPL */
- if((H5P_get(dxpl, "H5AC_metadata_tag", &tag)) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query property value")
+ if((H5P_get(dxpl, "H5C_tag", &tag)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to query property value")
if(cache_ptr->ignore_tags != TRUE) {
#if H5C_DO_TAGGING_SANITY_CHECKS
/* Perform some sanity checks to ensure that a correct tag is being applied */
- if(H5C_verify_tag(entry_ptr->type->id, tag) < 0)
+ if(H5C_verify_tag(entry_ptr->type->id, tag.value) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "tag verification failed")
#endif
} else {
@@ -9769,12 +10124,17 @@ H5C_tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr, hid_t dxpl_id)
arbitrarily set it to something for the sake of passing the tests.
If the tag value is set, then we'll just let it get assigned without
additional checking for correctness. */
- if(!tag)
- tag = H5AC__IGNORE_TAG;
+ if(!tag.value) {
+ tag.value = H5AC__IGNORE_TAG;
+ tag.globality = H5C_GLOBALITY_NONE;
+ } /* end if */
} /* end if */
/* Apply the tag to the entry */
- entry_ptr->tag = tag;
+ entry_ptr->tag = tag.value;
+
+ /* Apply the tag globality to the entry */
+ entry_ptr->globality = tag.globality;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -9783,87 +10143,166 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5C_flush_tagged_entries
- *
- * WARNING: Not yet tested or used anywhere. (written awhile ago,
- * will keep it around in anticipation of being used in
- * subsequent changes to support flushing individual objects).
+ * Function: H5C_evict_tagged_entries
*
- * Purpose: Flushes all entries with the specified tag to disk.
+ * Purpose: Evicts all entries with the specified tag from cache
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
* Programmer: Mike McGreevy
- * November 3, 2009
+ * August 19, 2010
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, H5C_t * cache_ptr, haddr_t tag)
+herr_t
+H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag)
{
- herr_t ret_value = SUCCEED;
+ /* Variable Declarations */
+ H5C_t *cache_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5C_cache_entry_t * next_entry_ptr = NULL;
+ hbool_t evicted_entries_last_pass;
+ hbool_t pinned_entries_need_evicted;
+ int i;
+ herr_t ret_value = SUCCEED;
+ /* Function Enter Macro */
FUNC_ENTER_NOAPI(FAIL)
/* Assertions */
- HDassert(0); /* This function is not yet used. We shouldn't be in here yet. */
- HDassert(cache_ptr != NULL);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(f);
+ HDassert(f->shared);
- /* Mark all entries with specified tag */
- if(H5C_mark_tagged_entries(cache_ptr, tag) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't mark tagged entries")
+ /* Get cache pointer */
+ cache_ptr = f->shared->cache;
- /* Flush all marked entries */
- if(H5C_flush_marked_entries(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush marked entries")
+ HDassert( cache_ptr != NULL );
+ HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
+
+ /* Start evicting entries */
+ do {
+
+ /* Reset pinned/evicted trackers */
+ pinned_entries_need_evicted = FALSE;
+ evicted_entries_last_pass = FALSE;
+
+ /* Iterate through entries in the index. */
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+
+ next_entry_ptr = cache_ptr->index[i];
+
+ while ( next_entry_ptr != NULL ) {
+
+ entry_ptr = next_entry_ptr;
+ next_entry_ptr = entry_ptr->ht_next;
+
+ if(( entry_ptr->tag == tag ) ||
+ ( entry_ptr->globality == H5C_GLOBALITY_MAJOR)) {
+
+ /* This entry will need to be evicted */
+
+ if ( entry_ptr->is_protected ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cannot evict protected entry");
+ } else if (entry_ptr->is_dirty) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cannot evict dirty entry");
+ } else if (entry_ptr->is_pinned) {
+
+ /* Can't evict at this time, but let's note that we hit a pinned
+ entry and we'll loop back around again (as evicting other
+ entries will hopefully unpin this entry) */
+
+ pinned_entries_need_evicted = TRUE;
+
+ } else {
+
+ /* Evict the Entry */
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG,
+ NULL) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "Entry eviction failed.")
+
+ evicted_entries_last_pass = TRUE;
+
+ } /* end if */
+
+ } /* end if */
+
+ } /* end while */
+
+ } /* end for */
+
+ /* Keep doing this until we have stopped evicted entries */
+ } while (evicted_entries_last_pass == TRUE);
+
+ /* If we stop evicting entries and pinned entries still need evicted,
+ then we have a problem. */
+ if (pinned_entries_need_evicted) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entries still need evicted?!");
+ } /* end if */
done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_flush_tagged_entries */
+ FUNC_LEAVE_NOAPI(ret_value);
+
+} /* H5C_evict_tagged_entries */
/*-------------------------------------------------------------------------
*
* Function: H5C_mark_tagged_entries
*
- * WARNING: Not yet tested or used anywhere. (written awhile ago,
- * will keep it around in anticipation of being used in
- * subsequent changes to support flushing individual objects).
- *
- * Purpose: Set the flush marker on entries in the cache that have
- * the specified tag.
+ * Purpose: Set the flush marker on dirty entries in the cache that have
+ * the specified tag, as well as all globally tagged entries.
+ * If mark_clean is set, this function will also mark all clean
+ * entries, indicating they are to be evicted.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
* Programmer: Mike McGreevy
- * November 3, 2009
+ * September 9, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_mark_tagged_entries(H5C_t * cache_ptr, haddr_t tag)
+H5C_mark_tagged_entries(H5C_t * cache_ptr, haddr_t tag, hbool_t mark_clean)
{
- H5C_cache_entry_t *next_entry_ptr; /* entry pointer */
- unsigned u; /* Local index variable */
+ /* Variable Declarations */
+ int u; /* Iterator */
+ H5C_cache_entry_t *entry_ptr = NULL; /* entry pointer */
FUNC_ENTER_NOAPI_NOINIT_NOERR
/* Assertions */
- HDassert(0); /* This function is not yet used. We shouldn't be in here yet. */
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
- /* Iterate through entries, marking those with specified tag. */
+ /* Iterate through entries, marking those with specified tag, as
+ * well as any major global entries which should always be flushed
+ * when flushing based on tag value */
for(u = 0; u < H5C__HASH_TABLE_LEN; u++) {
- next_entry_ptr = cache_ptr->index[u];
- while(next_entry_ptr != NULL) {
- if(next_entry_ptr->tag == tag)
- next_entry_ptr->flush_marker = TRUE;
+ entry_ptr = cache_ptr->index[u];
- next_entry_ptr = next_entry_ptr->ht_next;
- } /* end while */
+ while ( entry_ptr != NULL ) {
+
+ if (( entry_ptr->tag == tag ) ||
+ ( entry_ptr->globality == H5C_GLOBALITY_MAJOR)) {
+
+ /* We only want to set the flush marker on entries that
+ * actually need flushed (i.e., dirty ones), unless
+ * we've specified otherwise with the mark_clean flag */
+ if (entry_ptr->is_dirty || mark_clean) {
+
+ entry_ptr->flush_marker = TRUE;
+
+ } /* end if */
+
+ } /* end if */
+
+ entry_ptr = entry_ptr->ht_next;
+ } /* end while */
} /* for */
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -9874,16 +10313,12 @@ H5C_mark_tagged_entries(H5C_t * cache_ptr, haddr_t tag)
*
* Function: H5C_flush_marked_entries
*
- * WARNING: Not yet tested or used anywhere. (written awhile ago,
- * will keep it around in anticipation of being used in
- * subsequent changes to support flushing individual objects).
- *
* Purpose: Flushes all marked entries in the cache.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
* Programmer: Mike McGreevy
- * November 3, 2009
+ * November 3, 2010
*
*-------------------------------------------------------------------------
*/
@@ -9895,7 +10330,6 @@ H5C_flush_marked_entries(H5F_t * f, hid_t dxpl_id)
FUNC_ENTER_NOAPI_NOINIT
/* Assertions */
- HDassert(0); /* This function is not yet used. We shouldn't be in here yet. */
HDassert(f != NULL);
/* Flush all marked entries */
@@ -9986,11 +10420,53 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5C_retag_copied_metadata
+ * Function: H5C_flush_tagged_entries
+ *
+ * Purpose: Flushes all entries with the specified tag to disk.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Mike McGreevy
+ * August 19, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag)
+{
+ /* Variable Declarations */
+ H5C_t *cache_ptr = NULL;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Assertions */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Get cache pointer */
+ cache_ptr = f->shared->cache;
+
+ /* Mark all entries with specified tag */
+ if(H5C_mark_tagged_entries(cache_ptr, tag, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't mark tagged entries")
+
+ /* Flush all marked entries */
+ if(H5C_flush_marked_entries(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush marked entries")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_tagged_entries */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_retag_entries
*
* Purpose: Searches through cache index for all entries with the
- * H5AC__COPIED_TAG, indicating that it was created as a
- * result of an object copy, and applies the provided tag.
+ * value specified by src_tag and changes it to the value
+ * specified by dest_tag.
*
* Return: SUCCEED or FAIL.
*
@@ -10000,30 +10476,27 @@ done:
*-------------------------------------------------------------------------
*/
void
-H5C_retag_copied_metadata(H5C_t * cache_ptr, haddr_t metadata_tag)
+H5C_retag_entries(H5C_t * cache_ptr, haddr_t src_tag, haddr_t dest_tag)
{
unsigned u; /* Local index variable */
+ H5C_cache_entry_t *entry_ptr = NULL; /* entry pointer */
FUNC_ENTER_NOAPI_NOINIT_NOERR
- HDassert(cache_ptr);
-
- /* Iterate through entries, retagging those with the H5AC__COPIED_TAG tag */
+ /* Iterate through entries, retagging those with the src_tag tag */
for(u = 0; u < H5C__HASH_TABLE_LEN; u++) {
- H5C_cache_entry_t *next_entry_ptr; /* entry pointer */
-
- next_entry_ptr = cache_ptr->index[u];
- while(next_entry_ptr != NULL) {
- if(cache_ptr->index[u] != NULL)
- if((cache_ptr->index[u])->tag == H5AC__COPIED_TAG)
- (cache_ptr->index[u])->tag = metadata_tag;
-
- next_entry_ptr = next_entry_ptr->ht_next;
- } /* end while */
+ entry_ptr = cache_ptr->index[u];
+ while(entry_ptr != NULL) {
+ if(cache_ptr->index[u] != NULL)
+ if((cache_ptr->index[u])->tag == src_tag) {
+ (cache_ptr->index[u])->tag = dest_tag;
+ }
+ entry_ptr = entry_ptr->ht_next;
+ } /* end while */
} /* end for */
FUNC_LEAVE_NOAPI_VOID
-} /* H5C_retag_copied_metadata */
+} /* H5C_retag_entries */
/*-------------------------------------------------------------------------
@@ -10069,3 +10542,284 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_get_entry_ring() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_cork
+ *
+ * Purpose: To cork/uncork/get cork status of an object depending on "action":
+ * H5C__SET_CORK:
+ * To cork the object
+ * Return error if the object is already corked
+ * H5C__UNCORK:
+ * To uncork the obejct
+ * Return error if the object is not corked
+ * H5C__GET_CORKED:
+ * To retrieve the cork status of an object in
+ * the parameter "corked"
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_cork(H5C_t * cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked)
+{
+ haddr_t *ptr; /* Points to an address */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Assertions */
+ HDassert(cache_ptr != NULL);
+ HDassert(H5F_addr_defined(obj_addr));
+ HDassert(action == H5C__SET_CORK || action == H5C__UNCORK || action == H5C__GET_CORKED);
+
+ /* Search the list of corked object addresses in the cache */
+ ptr = (haddr_t *)H5SL_search(cache_ptr->cork_list_ptr, &obj_addr);
+
+ if(H5C__GET_CORKED == action) {
+ HDassert(corked);
+ if(ptr != NULL && *ptr == obj_addr)
+ *corked = TRUE;
+ else
+ *corked = FALSE;
+ } /* end if */
+ else {
+ hbool_t is_corked; /* Cork status for an entry */
+
+ /* Sanity check */
+ HDassert(H5C__SET_CORK == action || H5C__UNCORK == action);
+
+ /* Perform appropriate action */
+ if(H5C__SET_CORK == action) {
+ haddr_t *addr_ptr = NULL; /* Points to an address */
+
+ if(ptr != NULL && *ptr == obj_addr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't cork an already corked object")
+
+ /* Allocate address */
+ if(NULL == (addr_ptr = H5FL_MALLOC(haddr_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Insert into the list */
+ *addr_ptr = obj_addr;
+ if(H5SL_insert(cache_ptr->cork_list_ptr, addr_ptr, addr_ptr) < 0) {
+ addr_ptr = H5FL_FREE(haddr_t, addr_ptr);
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't insert address into cork list")
+ } /* end if */
+
+ /* Set the entry's cork status */
+ is_corked = TRUE;
+ } /* end if */
+ else {
+ if(ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't uncork an object that is not corked ")
+
+ /* Remove the object address from the list */
+ ptr = (haddr_t *)H5SL_remove(cache_ptr->cork_list_ptr, &obj_addr);
+ if(ptr == NULL || *ptr != obj_addr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't remove address from list")
+
+ /* Free address */
+ ptr = H5FL_FREE(haddr_t, ptr);
+
+ /* Set the entry's cork status */
+ is_corked = FALSE;
+ } /* end else */
+
+ /* Mark existing cache entries with tag (obj_addr) to the cork status */
+ if(H5C_mark_tagged_entries_cork(cache_ptr, obj_addr, is_corked) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't mark cork status on entry")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_cork() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_mark_tagged_entries_cork
+ *
+ * NEED: work to combine with H5C_mark_tagged_entries()--
+ * probably an action (FLUSH or CORK) with hbool_t clean_or_cork
+ *
+ * Purpose: To set the "is_corked" field to "val" for entries in cache
+ * with the entry's tag equals to "obj_addr".
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: Vailin Choi; January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C_mark_tagged_entries_cork(H5C_t *cache_ptr, haddr_t obj_addr, hbool_t val)
+{
+ /* Variable Declarations */
+ int u; /* Iterator */
+ H5C_cache_entry_t *entry_ptr = NULL; /* entry pointer */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Assertions */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Iterate through entries, find each entry with the specified tag */
+ /* and set the entry's "corked" field to "val" */
+ for(u = 0; u < H5C__HASH_TABLE_LEN; u++) {
+
+ entry_ptr = cache_ptr->index[u];
+
+ while(entry_ptr != NULL) {
+
+ if(entry_ptr->tag == obj_addr)
+ entry_ptr->is_corked = val;
+
+ entry_ptr = entry_ptr->ht_next;
+ } /* end while */
+ } /* end for */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C_mark_tagged_entries_cork */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_dirty()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming dirty or having its flush_dep_ndirty_children
+ * increased from 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry)
+{
+ unsigned i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert((entry->is_dirty && entry->flush_dep_ndirty_children == 0)
+ || (!entry->is_dirty && entry->flush_dep_ndirty_children == 1));
+
+ /* Iterate over the parent entries, if any */
+ for(i=0; i<entry->flush_dep_nparents; i++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children
+ < entry->flush_dep_parent[i]->flush_dep_nchildren);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[i]->flush_dep_ndirty_children++;
+
+ /* Propagate the flush dep dirty flag up the chain if necessary */
+ if(!entry->flush_dep_parent[i]->is_dirty
+ && entry->flush_dep_parent[i]->flush_dep_ndirty_children == 1)
+ if(H5C__mark_flush_dep_dirty(entry->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't propagate flush dep dirty flag")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_clean()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming clean or having its flush_dep_ndirty_children
+ * reduced to 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 11/13/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry)
+{
+ unsigned i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(!entry->is_dirty && entry->flush_dep_ndirty_children == 0);
+
+ /* Iterate over the parent entries, if any */
+ for(i=0; i<entry->flush_dep_nparents; i++) {
+ /* Sanity check */
+ HDassert(entry->flush_dep_parent[i]->flush_dep_ndirty_children > 0);
+
+ /* Adjust the parent's number of dirty children */
+ entry->flush_dep_parent[i]->flush_dep_ndirty_children--;
+
+ /* Propagate the flush dep clean flag up the chain if necessary */
+ if(!entry->flush_dep_parent[i]->is_dirty
+ && entry->flush_dep_parent[i]->flush_dep_ndirty_children == 0)
+ if(H5C__mark_flush_dep_clean(entry->flush_dep_parent[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't propagate flush dep clean flag")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_clean() */
+
+#ifndef NDEBUG
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__assert_flush_dep_nocycle()
+ *
+ * Purpose: Assert recursively that base_entry is not the same as
+ * entry, and perform the same assertion on all of entry's
+ * flush dependency parents. This is used to detect cycles
+ * created by flush dependencies.
+ *
+ * Return: void
+ *
+ * Programmer: Neil Fortner
+ * 12/10/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C__assert_flush_dep_nocycle(H5C_cache_entry_t * entry,
+ H5C_cache_entry_t * base_entry)
+{
+ unsigned i; /* Local index variable */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(base_entry);
+
+ /* Make sure the entries are not the same */
+ HDassert(base_entry != entry);
+
+ /* Iterate over entry's parents (if any) */
+ for(i=0; i<entry->flush_dep_nparents; i++)
+ H5C__assert_flush_dep_nocycle(entry->flush_dep_parent[i], base_entry);
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C__assert_flush_dep_nocycle() */
+#endif /* NDEBUG */
+
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 9508b98..f5cb4de 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -45,22 +45,13 @@
/* Package Private Macros */
/**************************/
-/* With the introduction of the fractal heap, it is now possible for
- * entries to be dirtied, resized, and/or moved in the flush callbacks.
- * As a result, on flushes, it may be necessary to make multiple passes
- * through the slist before it is empty. The H5C__MAX_PASSES_ON_FLUSH
- * #define is used to set an upper limit on the number of passes.
- * The current value was obtained via personal communication with
- * Quincey. I have applied a fudge factor of 2.
- *
- * -- JRM
- */
-#define H5C__MAX_PASSES_ON_FLUSH 4
-
/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
#define H5C__H5C_T_MAGIC 0x005CAC0E
+/* Initial allocated size of the "flush_dep_parent" array */
+#define H5C_FLUSH_DEP_PARENT_INIT 8
+
/****************************************************************************
*
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
@@ -2954,6 +2945,29 @@ if ( ( (cache_ptr)->index_size != \
* When we get to using H5C in other places, we may add
* code to write trace file data at the H5C level as well.
*
+ * logging_enabled: Boolean flag indicating whether cache logging
+ * which is used to record cache operations for use in
+ * debugging and performance analysis. When this flag is set
+ * to TRUE, it means that the log file is open and ready to
+ * receive log entries. It does NOT mean that cache operations
+ * are currently being recorded. That is controlled by the
+ * currently_logging flag (below).
+ *
+ * Since much of the code supporting the parallel metadata
+ * cache is in H5AC, we don't write the trace file from
+ * H5C. Instead, H5AC reads the trace_file_ptr as needed.
+ *
+ * When we get to using H5C in other places, we may add
+ * code to write trace file data at the H5C level as well.
+ *
+ * currently_logging: Boolean flag that indicates if cache operations are
+ * currently being logged. This flag is flipped by the
+ * H5Fstart/stop_mdc_logging functions.
+ *
+ * log_file_ptr: File pointer pointing to the log file. The I/O functions
+ * in stdio.h are used to write to the log file regardless of
+ * the VFD selected.
+ *
* aux_ptr: Pointer to void used to allow wrapper code to associate
* its data with an instance of H5C_t. The H5C cache code
* sets this field to NULL, and otherwise leaves it alone.
@@ -3226,6 +3240,11 @@ if ( ( (cache_ptr)->index_size != \
* to the slist since the last time this field was set to
* zero. Note that this value can be negative.
*
+ * cork_list_ptr: A skip list to track object addresses that are corked.
+ * When an entry is inserted or protected in the cache,
+ * the entry's associated object address (tag field) is
+ * checked against this skip list. If found, the entry
+ * is corked.
*
* When a cache entry is protected, it must be removed from the LRU
* list(s) as it cannot be either flushed or evicted until it is unprotected.
@@ -3812,6 +3831,9 @@ struct H5C_t {
uint32_t magic;
hbool_t flush_in_progress;
FILE * trace_file_ptr;
+ hbool_t logging_enabled;
+ hbool_t currently_logging;
+ FILE * log_file_ptr;
void * aux_ptr;
int32_t max_type_id;
const char * (* type_name_table_ptr);
@@ -3855,6 +3877,8 @@ struct H5C_t {
int64_t slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
+ H5SL_t * cork_list_ptr; /* list of corked object addresses */
+
/* Fields for tracking protected entries */
int32_t pl_len;
size_t pl_size;
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 4c65bb4..467c5d1 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -41,8 +41,8 @@
/**************************/
/* Cache configuration settings */
-#define H5C__MAX_NUM_TYPE_IDS 28
-#define H5C__PREFIX_LEN 32
+#define H5C__MAX_NUM_TYPE_IDS 29
+#define H5C__PREFIX_LEN 32
/* This sanity checking constant was picked out of the air. Increase
* or decrease it if appropriate. Its purposes is to detect corrupt
@@ -70,6 +70,7 @@
#define H5C__CLASS_SPECULATIVE_LOAD_FLAG ((unsigned)0x1)
#define H5C__CLASS_COMPRESSED_FLAG ((unsigned)0x2)
/* The following flags may only appear in test code */
+/* The H5C__CLASS_SKIP_READS & H5C__CLASS_SKIP_WRITES flags are used in H5Oproxy.c */
#define H5C__CLASS_NO_IO_FLAG ((unsigned)0x4)
#define H5C__CLASS_SKIP_READS ((unsigned)0x8)
#define H5C__CLASS_SKIP_WRITES ((unsigned)0x10)
@@ -97,14 +98,6 @@
#define H5C__DEFAULT_MAX_CACHE_SIZE ((size_t)(4 * 1024 * 1024))
#define H5C__DEFAULT_MIN_CLEAN_SIZE ((size_t)(2 * 1024 * 1024))
-/* Maximum height of flush dependency relationships between entries. This is
- * currently tuned to the extensible array (H5EA) data structure, which only
- * requires 6 levels of dependency (i.e. heights 0-6) (actually, the extensible
- * array needs 4 levels, plus another 2 levels are needed: one for the layer
- * under the extensible array and one for the layer above it).
- */
-#define H5C__NUM_FLUSH_DEP_HEIGHTS 6
-
/* Values for cache entry magic field */
#define H5C__H5C_CACHE_ENTRY_T_MAGIC 0x005CAC0A
#define H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC 0xDeadBeef
@@ -205,12 +198,18 @@
#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080
#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100
#define H5C__READ_ONLY_FLAG 0x0200
-#define H5C__FREE_FILE_SPACE_FLAG 0x0800
-#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
-#define H5C__FLUSH_LAST_FLAG 0x2000
-#define H5C__FLUSH_COLLECTIVELY_FLAG 0x4000
+#define H5C__FREE_FILE_SPACE_FLAG 0x0400
+#define H5C__TAKE_OWNERSHIP_FLAG 0x0800
+#define H5C__FLUSH_LAST_FLAG 0x1000
+#define H5C__FLUSH_COLLECTIVELY_FLAG 0x2000
+#define H5C__EVICT_ALLOW_LAST_PINS_FLAG 0x4000
#define H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG 0x8000
+/* Definitions for cache "tag" property */
+#define H5C_TAG_NAME "H5C_tag"
+#define H5C_TAG_SIZE sizeof(H5C_tag_t)
+#define H5C_TAG_DEF {(haddr_t)0, H5C_GLOBALITY_NONE}
+
/* Debugging/sanity checking/statistics settings */
#ifndef NDEBUG
#define H5C_DO_SANITY_CHECKS 1
@@ -227,6 +226,11 @@
#define H5C_DO_EXTREME_SANITY_CHECKS 0
#endif /* NDEBUG */
+/* Cork actions: cork/uncork/get cork status of an object */
+#define H5C__SET_CORK 0x1
+#define H5C__UNCORK 0x2
+#define H5C__GET_CORKED 0x4
+
/* Note: The memory sanity checks aren't going to work until I/O filters are
* changed to call a particular alloc/free routine for their buffers,
* because the H5AC__SERIALIZE_RESIZED_FLAG set by the fractal heap
@@ -268,8 +272,20 @@
/* Typedef for the main structure for the cache (defined in H5Cpkg.h) */
typedef struct H5C_t H5C_t;
+/* Cache entry tag structure */
+typedef struct H5C_tag_t {
+ haddr_t value;
+ int globality;
+} H5C_tag_t;
+
+/* Define enum for cache entry tag 'globality' value */
+typedef enum {
+ H5C_GLOBALITY_NONE=0, /* Non-global tag */
+ H5C_GLOBALITY_MINOR, /* global, not flushed during single object flush */
+ H5C_GLOBALITY_MAJOR /* global, needs flushed during single obect flush */
+} H5C_tag_globality_t;
-/***************************************************************************
+/*
*
* Struct H5C_class_t
*
@@ -376,32 +392,68 @@ typedef struct H5C_t H5C_t;
* code. When it is set, reads on load will be skipped,
* and an uninitialize buffer will be passed to the
* deserialize function.
+ * This flag is used in H5Oproxy.c because this client does not
+ * exist on disk. See description in that file.
*
* H5C__CLASS_SKIP_WRITES: This flags is intended only for use in test
* code. When it is set, writes of buffers prepared by the
* serialize callback will be skipped.
+ * This flag is used in H5Oproxy.c because this client does not
+ * exist on disk. See description in that file.
*
* GET_LOAD_SIZE: Pointer to the 'get load size' function.
*
- * This function must be able to determine the size of the disk image of
- * a metadata cache entry, given the 'udata' that will be passed to the
- * 'deserialize' callback.
- *
- * Note that if either the H5C__CLASS_SPECULATIVE_LOAD_FLAG or
- * the H5C__CLASS_COMPRESSED_FLAG is set, the disk image size
- * returned by this callback is either a first guess (if the
- * H5C__CLASS_SPECULATIVE_LOAD_FLAG is set) or (if the
- * H5C__CLASS_COMPRESSED_FLAG is set), the exact on disk size
- * of the entry whether it has been run through filters or not.
- * In all other cases, the value returned should be the correct
- * uncompressed size of the entry.
- *
- * The typedef for the deserialize callback is as follows:
- *
- * typedef herr_t (*H5C_get_load_size_func_t)(void *udata_ptr,
- * size_t *image_len_ptr);
- *
- * The parameters of the deserialize callback are as follows:
+ * This function determines the size of a metadata cache entry based
+ * on the parameter "image":
+ * (a) When the piece of metadata has not been read, "image" is null.
+ * This function determines the size based on the information in the
+ * parameter "udata" or an initial speculative guess. The size is
+ * returned in the parameter "image_len_ptr".
+ * (b) When the piece of metadata has been read, "image" is non-null.
+ * This function might deserialize the needed metadata information
+ * to determine the actual size. The size is returned in the
+ * parameter "actual_len_ptr". The calculation of actual size is
+ * needed for checksum verification.
+ *
+ * For an entry with H5C__CLASS_NO_FLAGS_SET:
+ * This function computes either one of the following:
+ * (1) When "image" is null, it returns in "image_len_ptr"
+ * the on disk size of the entry.
+ * (2) When "image" is non-null, it does nothing.
+ * The values stored in "image_len_ptr" and "actual_len_ptr"
+ * should be the same.
+ *
+ * For an entry with H5C__CLASS_SPECULATIVE_LOAD_FLAG:
+ * This function computes either one of the following:
+ * (1) When "image" is null, it returns in "image_len_ptr"
+ * the initial guess of the entry's on disk size.
+ * (2) When "image" is non-null, it returns in "actual_len_ptr"
+ * the actual size of the entry on disk by
+ * deserializing the needed metadata information.
+ *
+ * For an entry with H5C__CLASS_COMPRESSED_FLAG:
+ * This function computes either one of the following:
+ * (1) When "image" is null, it returns in "image_len_ptr"
+ * the entry's on disk size:
+ * --for a filtered entry: the compressed size
+ * --for a non-filtered entry: the uncompressed size
+ * (2) When "image" is non-null, it returns in "actual_len_ptr:
+ * --for a filtered entry: the de-compressed size based on
+ * "udata" information
+ * --for a non-filtered entry: the uncompressed size
+ *
+ * The typedef for the get_load_size callback is as follows:
+ *
+ * typedef herr_t (*H5C_get_load_size_func_t)(const void *image_ptr,
+ * void *udata_ptr,
+ * size_t *image_len_ptr,
+ * size_t *actual_len_ptr,
+ * size_t *compressed_ptr,
+ * size_t *compressed_image_len_ptr);
+ *
+ * The parameters of the get_load_size callback are as follows:
+ *
+ * image_ptr: Pointer to a buffer containing the metadata read in.
*
* udata_ptr: Pointer to user data provided in the protect call, which
* will also be passed through to the deserialize callback.
@@ -412,16 +464,52 @@ typedef struct H5C_t H5C_t;
* This value is used by the cache to determine the size of
* the disk image for the metadata, in order to read the disk
* image from the file.
+ *
+ * actual_len_ptr: Pointer to the location containing the actual length
+ * of the metadata entry on disk.
+ *
+ * compressed_ptr: See description in image_len callback.
+ *
+ * compressed_image_len_ptr: See description in image_len callback.
*
* Processing in the get_load_size function should proceed as follows:
*
- * If successful, the function will place the length of the on disk
- * image associated with supplied user data in *image_len_ptr, and
- * then return SUCCEED.
+ * If successful, the function will place the length in either the
+ * *image_len_ptr or *actual_le_ptr associated with supplied user data
+ * and then return SUCCEED.
*
* On failure, the function must return FAIL and push error information
* onto the error stack with the error API routines, without modifying
- * the value pointed to by the image_len_ptr.
+ * the value pointed to by image_len_ptr or actual_len_ptr.
+ *
+ *
+ * VERIFY_CHKSUM: Pointer to the verify_chksum function.
+ *
+ * This function verifies the checksum computed for the metadata is
+ * the same as the checksum stored in the metadata.
+ *
+ * It computes the checksum based on the metadata stored in the
+ * parameter "image_ptr" and the actual length of the metadata in the
+ * parameter "len" which is obtained from the "get_load_size" callback.
+ *
+ * For a filtered metadata entry with H5C__CLASS_COMPRESSED_FLAG,
+ * "len" is the decompressed size. This function will decompress
+ * the metadata and compute the checksum for the metadata with
+ * length "len".
+ *
+ * The typedef for the verify_chksum callback is as follows:
+ *
+ * typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr,
+ * size_t len,
+ * void *udata_ptr);
+ *
+ * The parameters of the verify_chksum callback are as follows:
+ *
+ * image_ptr: Pointer to a buffer containing the metadata read in.
+ *
+ * len: The actual length of the metadata.
+ *
+ * udata_ptr: Pointer to user data.
*
*
* DESERIALIZE: Pointer to the deserialize function.
@@ -460,6 +548,12 @@ typedef struct H5C_t H5C_t;
*
* Processing in the deserialize function should proceed as follows:
*
+ * NOTE: With the modification to get_load_size callback (see above
+ * description) to compute the actual length of a metadata entry for
+ * checksum verification, the value in the parameter "len" will be the
+ * actual length of the metadata and a subsequent call to image_len
+ * callback is not needed.
+ *
* If the image contains valid data, and is of the correct length,
* the deserialize function must allocate space for an in core
* representation of that data, load the contents of the image into
@@ -513,6 +607,10 @@ typedef struct H5C_t H5C_t;
*
*
* IMAGE_LEN: Pointer to the image length callback.
+
+ * NOTE: With the modification to the get_load_size callback (see above
+ * description) due to checksum verification, the image_len callback
+ * is mainly used for newly inserted entries and assert verification.
*
* This callback exists primarily to support
* H5C__CLASS_SPECULATIVE_LOAD_FLAG and H5C__CLASS_COMPRESSED_FLAG
@@ -1072,8 +1170,10 @@ typedef enum H5C_notify_action_t {
} H5C_notify_action_t;
/* Cache client callback function pointers */
-typedef herr_t (*H5C_get_load_size_func_t)(const void *udata_ptr,
- size_t *image_len_ptr);
+typedef herr_t (*H5C_get_load_size_func_t)(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len_ptr,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr, size_t len, void *udata_ptr);
typedef void *(*H5C_deserialize_func_t)(const void *image_ptr,
size_t len, void *udata_ptr, hbool_t *dirty_ptr);
typedef herr_t (*H5C_image_len_func_t)(const void *thing,
@@ -1097,6 +1197,7 @@ typedef struct H5C_class_t {
H5FD_mem_t mem_type;
unsigned flags;
H5C_get_load_size_func_t get_load_size;
+ H5C_verify_chksum_func_t verify_chksum;
H5C_deserialize_func_t deserialize;
H5C_image_len_func_t image_len;
H5C_pre_serialize_func_t pre_serialize;
@@ -1273,6 +1374,9 @@ typedef int H5C_ring_t;
* The name is not particularly descriptive, but is retained
* to avoid changes in existing code.
*
+ * is_corked: Boolean flag indicating whether the cache entry associated
+ * with an object is corked or not corked.
+ *
* is_dirty: Boolean flag indicating whether the contents of the cache
* entry has been modified since the last time it was written
* to disk.
@@ -1451,33 +1555,29 @@ typedef int H5C_ring_t;
*
* Fields supporting the 'flush dependency' feature:
*
- * Entries in the cache may have a 'flush dependency' on another entry in the
+ * Entries in the cache may have a 'flush dependencies' on other entries in the
* cache. A flush dependency requires that all dirty child entries be flushed
* to the file before a dirty parent entry (of those child entries) can be
* flushed to the file. This can be used by cache clients to create data
* structures that allow Single-Writer/Multiple-Reader (SWMR) access for the
* data structure.
*
- * The leaf child entry will have a "height" of 0, with any parent entries
- * having a height of 1 greater than the maximum height of any of their child
- * entries (flush dependencies are allowed to create asymmetric trees of
- * relationships).
+ * flush_dep_parent: Pointer to the array of flush dependency parent entries
+ * for this entry.
*
- * flush_dep_parent: Pointer to the parent entry for an entry in a flush
- * dependency relationship.
+ * flush_dep_nparents: Number of flush dependency parent entries for this
+ * entry, i.e. the number of valid elements in flush_dep_parent.
*
- * child_flush_dep_height_rc: An array of reference counts for child entries,
- * where the number of children of each height is tracked.
+ * flush_dep_parent_nalloc: The number of allocated elements in
+ * flush_dep_parent_nalloc.
*
- * flush_dep_height: The height of the entry, which is one greater than the
- * maximum height of any of its child entries..
+ * flush_dep_nchildren: Number of flush dependency children for this entry. If
+ * this field is nonzero, then this entry must be pinned and
+ * therefore cannot be evicted.
*
- * pinned_from_client: Whether the entry was pinned by an explicit pin request
- * from a cache client.
- *
- * pinned_from_cache: Whether the entry was pinned implicitly as a
- * request of being a parent entry in a flush dependency
- * relationship.
+ * flush_dep_ndirty_children: Number of flush dependency children that are
+ * either dirty or have a nonzero flush_dep_ndirty_children. If
+ * this field is nonzero, then this entry cannot be flushed.
*
*
* Fields supporting the hash table:
@@ -1599,6 +1699,8 @@ typedef struct H5C_cache_entry_t {
hbool_t image_up_to_date;
const H5C_class_t * type;
haddr_t tag;
+ int globality;
+ hbool_t is_corked;
hbool_t is_dirty;
hbool_t dirtied;
hbool_t is_protected;
@@ -1620,9 +1722,11 @@ typedef struct H5C_cache_entry_t {
H5C_ring_t ring;
/* fields supporting the 'flush dependency' feature: */
- struct H5C_cache_entry_t * flush_dep_parent;
- uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
- unsigned flush_dep_height;
+ struct H5C_cache_entry_t ** flush_dep_parent;
+ unsigned flush_dep_nparents;
+ unsigned flush_dep_parent_nalloc;
+ unsigned flush_dep_nchildren;
+ unsigned flush_dep_ndirty_children;
hbool_t pinned_from_client;
hbool_t pinned_from_cache;
@@ -1942,14 +2046,26 @@ H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size,
int max_type_id, const char *(*type_name_table_ptr),
H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted,
H5C_log_flush_func_t log_flush, void *aux_ptr);
+H5_DLL herr_t H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[], hbool_t start_immediately);
+H5_DLL herr_t H5C_tear_down_logging(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_start_logging(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_stop_logging(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_get_logging_status(const H5C_t *cache_ptr, /*OUT*/ hbool_t *is_enabled,
+ /*OUT*/ hbool_t *is_currently_logging);
+H5_DLL herr_t H5C_write_log_message(const H5C_t *cache_ptr, const char message[]);
+
H5_DLL void H5C_def_auto_resize_rpt_fcn(H5C_t *cache_ptr, int32_t version,
double hit_rate, enum H5C_resize_status status,
size_t old_max_cache_size, size_t new_max_cache_size,
size_t old_min_clean_size, size_t new_min_clean_size);
H5_DLL herr_t H5C_dest(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t dxpl_id,
- const H5C_class_t *type, haddr_t addr, unsigned flags);
+H5_DLL herr_t H5C_evict(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type, haddr_t addr, unsigned flags);
+
+
H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags);
+H5_DLL herr_t H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag);
+H5_DLL herr_t H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag);
H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr,
H5C_auto_size_ctl_t *config_ptr);
@@ -1959,7 +2075,7 @@ H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr,
H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr);
H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr,
size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr,
- hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr,
+ hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr, hbool_t *is_corked_ptr,
hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr);
H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr);
H5_DLL void * H5C_get_aux_ptr(const H5C_t *cache_ptr);
@@ -1991,7 +2107,8 @@ H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *thing,
H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr,
unsigned int tests);
H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr);
-H5_DLL void H5C_retag_copied_metadata(H5C_t *cache_ptr, haddr_t metadata_tag);
+H5_DLL void H5C_retag_entries(H5C_t * cache_ptr, haddr_t src_tag, haddr_t dest_tag);
+H5_DLL herr_t H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked);
H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring);
#ifdef H5_HAVE_PARALLEL
diff --git a/src/H5D.c b/src/H5D.c
index 3efae8e..b1643d1 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -924,4 +924,152 @@ H5Dset_extent(hid_t dset_id, const hsize_t size[])
done:
FUNC_LEAVE_API(ret_value)
} /* end H5Dset_extent() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Dflush
+ *
+ * Purpose: Flushes all buffers associated with a dataset.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Dflush(hid_t dset_id)
+{
+ H5D_t *dset; /* Dataset for this operation */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", dset_id);
+
+ /* Check args */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+
+ /* Flush any dataset information still cached in memory */
+ if(H5D__flush_real(dset, H5AC_dxpl_id) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
+
+ /* Flush object's metadata to file */
+ if(H5O_flush_common(&dset->oloc, dset_id, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to flush dataset and object flush callback")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Dflush */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Drefresh
+ *
+ * Purpose: Refreshes all buffers associated with a dataset.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * July 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Drefresh(hid_t dset_id)
+{
+ H5D_t *dset; /* Dataset to refresh */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", dset_id);
+
+ /* Check args */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+
+ /* Call private function to refresh the dataset object */
+ if((H5D__refresh(dset_id, dset, H5AC_dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTLOAD, FAIL, "unable to refresh dataset")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Drefresh() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Dformat_convert (Internal)
+ *
+ * Purpose: Convert a dataset's chunk indexing type to version 1 B-tree
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Dformat_convert(hid_t dset_id)
+{
+ H5D_t *dset; /* Dataset to refresh */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", dset_id);
+
+ /* Check args */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+
+ /* Nothing to do if not a chunked dataset */
+ if(dset->shared->layout.type != H5D_CHUNKED)
+ HGOTO_DONE(SUCCEED)
+
+ /* Nothing to do if the chunk indexing type is already version 1 B-tree */
+ if(dset->shared->layout.u.chunk.idx_type == H5D_CHUNK_IDX_BTREE)
+ HGOTO_DONE(SUCCEED)
+
+ /* Call private function to do the conversion */
+ if((H5D__format_convert(dset, H5AC_dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTLOAD, FAIL, "unable to convert chunk indexing type for dataset")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Dformat_convert */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Dget_chunk_index_type (Internal)
+ *
+ * Purpose: Retrieve a dataset's chunk indexing type
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Dget_chunk_index_type(hid_t did, H5D_chunk_index_t *idx_type)
+{
+ H5D_t *dset; /* Dataset to refresh */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*Dk", did, idx_type);
+
+ /* Check args */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(did, H5I_DATASET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+
+ /* Should be a chunked dataset */
+ if(dset->shared->layout.type != H5D_CHUNKED)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dataset is not chunked")
+
+ if(idx_type) /* Get the chunk indexing type */
+ *idx_type = dset->shared->layout.u.chunk.idx_type;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Dget_chunk_index_type() */
diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c
index 85238da..159acb0 100644
--- a/src/H5Dbtree.c
+++ b/src/H5Dbtree.c
@@ -153,6 +153,7 @@ static herr_t H5D__btree_idx_dest(const H5D_chk_idx_info_t *idx_info);
/* v1 B-tree indexed chunk I/O ops */
const H5D_chunk_ops_t H5D_COPS_BTREE[1] = {{
+ FALSE, /* v1 B-tree indices does not support SWMR access */
H5D__btree_idx_init, /* insert */
H5D__btree_idx_create, /* create */
H5D__btree_idx_is_space_alloc, /* is_space_alloc */
@@ -939,6 +940,7 @@ H5D__btree_idx_create(const H5D_chk_idx_info_t *idx_info)
/* Initialize "user" data for B-tree callbacks, etc. */
udata.layout = idx_info->layout;
udata.storage = idx_info->storage;
+ udata.rdcc = NULL;
/* Create the v1 B-tree for the chunk index */
if(H5B_create(idx_info->f, idx_info->dxpl_id, H5B_BTREE, &udata, &(idx_info->storage->idx_addr)/*out*/) < 0)
diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c
new file mode 100644
index 0000000..5ff174d
--- /dev/null
+++ b/src/H5Dbtree2.c
@@ -0,0 +1,1640 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ *
+ * Purpose: v2 B-tree indexing for chunked datasets with > 1 unlimited dimensions.
+ * Each dataset chunk in the b-tree is identified by its dimensional offset.
+ *
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Dmodule.h" /* This source code file is part of the H5D module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Dpkg.h" /* Datasets */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File space management */
+#include "H5VMprivate.h" /* Vector and array functions */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+/* User data for creating callback context */
+typedef struct H5D_bt2_ctx_ud_t {
+ const H5F_t *f; /* Pointer to file info */
+ uint32_t chunk_size; /* Size of chunk (bytes; for filtered object) */
+ unsigned ndims; /* Number of dimensions */
+ uint32_t *dim; /* Size of chunk in elements */
+} H5D_bt2_ctx_ud_t;
+
+/* The callback context */
+typedef struct H5D_bt2_ctx_t {
+ uint32_t chunk_size; /* Size of chunk (bytes; constant for unfiltered object) */
+ size_t sizeof_addr; /* Size of file addresses in the file (bytes) */
+ size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */
+ unsigned ndims; /* Number of dimensions in chunk */
+ uint32_t *dim; /* Size of chunk in elements */
+} H5D_bt2_ctx_t;
+
+/* User data for the chunk's removal callback routine */
+typedef struct H5D_bt2_remove_ud_t {
+ H5F_t *f; /* File pointer for operation */
+ hid_t dxpl_id; /* DXPL ID for operation */
+} H5D_bt2_remove_ud_t;
+
+/* Callback info for iteration over chunks in v2 B-tree */
+typedef struct H5D_bt2_it_ud_t {
+ H5D_chunk_cb_func_t cb; /* Callback routine for the chunk */
+ void *udata; /* User data for the chunk's callback routine */
+} H5D_bt2_it_ud_t;
+
+/* User data for compare callback */
+typedef struct H5D_bt2_ud_t {
+ H5D_chunk_rec_t rec; /* The record to search for */
+ unsigned ndims; /* Number of dimensions for the chunked dataset */
+} H5D_bt2_ud_t;
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Shared v2 B-tree methods for indexing filtered and non-filtered chunked datasets */
+static void *H5D__bt2_crt_context(void *udata);
+static herr_t H5D__bt2_dst_context(void *ctx);
+static herr_t H5D__bt2_store(void *native, const void *udata);
+static herr_t H5D__bt2_compare(const void *rec1, const void *rec2);
+static void *H5D__bt2_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr);
+static herr_t H5D__bt2_dst_dbg_context(void *_u_ctx);
+
+/* v2 B-tree class for indexing non-filtered chunked datasets */
+static herr_t H5D__bt2_unfilt_encode(uint8_t *raw, const void *native, void *ctx);
+static herr_t H5D__bt2_unfilt_decode(const uint8_t *raw, void *native, void *ctx);
+static herr_t H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth,
+ const void *record, const void *u_ctx);
+
+/* v2 B-tree class for indexing filtered chunked datasets */
+static herr_t H5D__bt2_filt_encode(uint8_t *raw, const void *native, void *ctx);
+static herr_t H5D__bt2_filt_decode(const uint8_t *raw, void *native, void *ctx);
+static herr_t H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth,
+ const void *record, const void *u_ctx);
+
+/* Helper routine */
+static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info);
+
+/* Callback for H5B2_iterate() which is called in H5D__bt2_idx_iterate() */
+static int H5D__bt2_idx_iterate_cb(const void *_record, void *_udata);
+
+/* Callback for H5B2_find() which is called in H5D__bt2_idx_get_addr() */
+static herr_t H5D__bt2_found_cb(const void *nrecord, void *op_data);
+
+/*
+ * Callback for H5B2_remove() and H5B2_delete() which is called
+ * in H5D__bt2_idx_remove() and H5D__bt2_idx_delete().
+ */
+static herr_t H5D__bt2_remove_cb(const void *nrecord, void *_udata);
+
+/* Callback for H5B2_modify() which is called in H5D__bt2_idx_insert() */
+static herr_t H5D__bt2_mod_cb(void *_record, void *_op_data, hbool_t *changed);
+
+/* Chunked layout indexing callbacks for v2 B-tree indexing */
+static herr_t H5D__bt2_idx_init(const H5D_chk_idx_info_t *idx_info,
+ const H5S_t *space, haddr_t dset_ohdr_addr);
+static herr_t H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info);
+static hbool_t H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage);
+static herr_t H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata, const H5D_t *dset);
+static herr_t H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata);
+static int H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata);
+static herr_t H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata);
+static herr_t H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst);
+static herr_t H5D__bt2_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
+ H5O_storage_chunk_t *storage_dst, hid_t dxpl_id);
+static herr_t H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *size);
+static herr_t H5D__bt2_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr);
+static herr_t H5D__bt2_idx_dump(const H5O_storage_chunk_t *storage,
+ FILE *stream);
+static herr_t H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Chunked dataset I/O ops for v2 B-tree indexing */
+const H5D_chunk_ops_t H5D_COPS_BT2[1] = {{
+ TRUE, /* Fixed array indices support SWMR access */
+ H5D__bt2_idx_init, /* init */
+ H5D__bt2_idx_create, /* create */
+ H5D__bt2_idx_is_space_alloc, /* is_space_alloc */
+ H5D__bt2_idx_insert, /* insert */
+ H5D__bt2_idx_get_addr, /* get_addr */
+ NULL, /* resize */
+ H5D__bt2_idx_iterate, /* iterate */
+ H5D__bt2_idx_remove, /* remove */
+ H5D__bt2_idx_delete, /* delete */
+ H5D__bt2_idx_copy_setup, /* copy_setup */
+ H5D__bt2_idx_copy_shutdown, /* copy_shutdown */
+ H5D__bt2_idx_size, /* size */
+ H5D__bt2_idx_reset, /* reset */
+ H5D__bt2_idx_dump, /* dump */
+ H5D__bt2_idx_dest /* destroy */
+}};
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/* v2 B-tree class for indexing non-filtered chunked datasets */
+const H5B2_class_t H5D_BT2[1] = {{ /* B-tree class information */
+ H5B2_CDSET_ID, /* Type of B-tree */
+ "H5B2_CDSET_ID", /* Name of B-tree class */
+ sizeof(H5D_chunk_rec_t), /* Size of native record */
+ H5D__bt2_crt_context, /* Create client callback context */
+ H5D__bt2_dst_context, /* Destroy client callback context */
+ H5D__bt2_store, /* Record storage callback */
+ H5D__bt2_compare, /* Record comparison callback */
+ H5D__bt2_unfilt_encode, /* Record encoding callback */
+ H5D__bt2_unfilt_decode, /* Record decoding callback */
+ H5D__bt2_unfilt_debug, /* Record debugging callback */
+ H5D__bt2_crt_dbg_context, /* Create debugging context */
+ H5D__bt2_dst_dbg_context /* Destroy debugging context */
+}};
+
+/* v2 B-tree class for indexing filtered chunked datasets */
+const H5B2_class_t H5D_BT2_FILT[1] = {{ /* B-tree class information */
+ H5B2_CDSET_FILT_ID, /* Type of B-tree */
+ "H5B2_CDSET_FILT_ID", /* Name of B-tree class */
+ sizeof(H5D_chunk_rec_t), /* Size of native record */
+ H5D__bt2_crt_context, /* Create client callback context */
+ H5D__bt2_dst_context, /* Destroy client callback context */
+ H5D__bt2_store, /* Record storage callback */
+ H5D__bt2_compare, /* Record comparison callback */
+ H5D__bt2_filt_encode, /* Record encoding callback */
+ H5D__bt2_filt_decode, /* Record decoding callback */
+ H5D__bt2_filt_debug, /* Record debugging callback */
+ H5D__bt2_crt_dbg_context, /* Create debugging context */
+ H5D__bt2_dst_dbg_context /* Destroy debugging context */
+}};
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* Declare a free list to manage the H5D_bt2_ctx_t struct */
+H5FL_DEFINE_STATIC(H5D_bt2_ctx_t);
+/* Declare a free list to manage the H5D_bt2_ctx_ud_t struct */
+H5FL_DEFINE_STATIC(H5D_bt2_ctx_ud_t);
+/* Declare a free list to manage the page elements */
+H5FL_BLK_DEFINE(chunk_dim);
+
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_crt_context
+ *
+ * Purpose: Create client callback context
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D__bt2_crt_context(void *_udata)
+{
+ H5D_bt2_ctx_ud_t *udata = (H5D_bt2_ctx_ud_t *)_udata; /* User data for building callback context */
+ H5D_bt2_ctx_t *ctx; /* Callback context structure */
+ uint32_t *my_dim = NULL; /* Pointer to copy of chunk dimension size */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(udata->ndims > 0 && udata->ndims < H5O_LAYOUT_NDIMS);
+
+ /* Allocate callback context */
+ if(NULL == (ctx = H5FL_MALLOC(H5D_bt2_ctx_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate callback context")
+
+ /* Determine the size of addresses and set the chunk size and # of dimensions for the dataset */
+ ctx->sizeof_addr = H5F_SIZEOF_ADDR(udata->f);
+ ctx->chunk_size = udata->chunk_size;
+ ctx->ndims = udata->ndims;
+
+ /* Set up the "local" information for this dataset's chunk dimension sizes */
+ if(NULL == (my_dim = (uint32_t *)H5FL_BLK_MALLOC(chunk_dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate chunk dims")
+ HDmemcpy(my_dim, udata->dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t));
+ ctx->dim = my_dim;
+
+ /*
+ * Compute the size required for encoding the size of a chunk,
+ * allowing for an extra byte, in case the filter makes the chunk larger.
+ */
+ ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8);
+ if(ctx->chunk_size_len > 8)
+ ctx->chunk_size_len = 8;
+
+ /* Set return value */
+ ret_value = ctx;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_crt_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_dst_context
+ *
+ * Purpose: Destroy client callback context
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_dst_context(void *_ctx)
+{
+ H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(ctx);
+
+ /* Free array for chunk dimension sizes */
+ if(ctx->dim)
+ (void)H5FL_BLK_FREE(chunk_dim, ctx->dim);
+ /* Release callback context */
+ ctx = H5FL_FREE(H5D_bt2_ctx_t, ctx);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_dst_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_store
+ *
+ * Purpose: Store native information into record for v2 B-tree
+ * (non-filtered)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_store(void *record, const void *_udata)
+{
+ const H5D_bt2_ud_t *udata = (const H5D_bt2_ud_t *)_udata; /* User data */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ *(H5D_chunk_rec_t *)record = udata->rec;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_store() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_compare
+ *
+ * Purpose: Compare two native information records, according to some key
+ * (non-filtered)
+ *
+ * Return: <0 if rec1 < rec2
+ * =0 if rec1 == rec2
+ * >0 if rec1 > rec2
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_compare(const void *_udata, const void *_rec2)
+{
+ const H5D_bt2_ud_t *udata = (const H5D_bt2_ud_t *)_udata; /* User data */
+ const H5D_chunk_rec_t *rec1 = &(udata->rec); /* The search record */
+ const H5D_chunk_rec_t *rec2 = (const H5D_chunk_rec_t *)_rec2; /* The native record */
+ herr_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(rec1);
+ HDassert(rec2);
+
+ /* Compare the offsets but ignore the other fields */
+ ret_value = H5VM_vector_cmp_u(udata->ndims, rec1->scaled, rec2->scaled);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_compare() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_crt_dbg_context
+ *
+ * Purpose: Create user data for debugged callback context
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D__bt2_crt_dbg_context(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, haddr_t obj_addr)
+{
+ H5D_bt2_ctx_ud_t *u_ctx = NULL; /* User data for creating callback context */
+ H5O_loc_t obj_loc; /* Pointer to an object's location */
+ hbool_t obj_opened = FALSE; /* Flag to indicate that the object header was opened */
+ H5O_layout_t layout; /* Layout message */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(H5F_addr_defined(obj_addr));
+
+ /* Set up the object header location info */
+ H5O_loc_reset(&obj_loc);
+ obj_loc.file = f;
+ obj_loc.addr = obj_addr;
+
+ /* Open the object header where the layout message resides */
+ if(H5O_open(&obj_loc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header")
+ obj_opened = TRUE;
+
+ /* Read the layout message */
+ if(NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout, dxpl_id))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info")
+
+ /* close the object header */
+ if(H5O_close(&obj_loc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header")
+
+ /* Allocate structure for storing user data to create callback context */
+ if(NULL == (u_ctx = H5FL_MALLOC(H5D_bt2_ctx_ud_t)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate user data context structure ")
+
+ /* Set information for context structure */
+ u_ctx->f = f;
+ u_ctx->chunk_size = layout.u.chunk.size;
+ u_ctx->ndims = layout.u.chunk.ndims - 1;
+
+ /* Set return value */
+ ret_value = u_ctx;
+
+done:
+ /* Cleanup on error */
+ if(ret_value == NULL) {
+ /* Release context structure */
+ if(u_ctx)
+ u_ctx = H5FL_FREE(H5D_bt2_ctx_ud_t, u_ctx);
+
+ /* Close object header */
+ if(obj_opened) {
+ if(H5O_close(&obj_loc) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header")
+ } /* end if */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_crt_dbg_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_dst_dbg_context
+ *
+ * Purpose: Destroy client callback context
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_dst_dbg_context(void *_u_ctx)
+{
+ H5D_bt2_ctx_ud_t *u_ctx = (H5D_bt2_ctx_ud_t *)_u_ctx; /* User data for creating callback context */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(u_ctx);
+
+ /* Release user data for creating callback context */
+ u_ctx = H5FL_FREE(H5D_bt2_ctx_ud_t, u_ctx);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_dst_dbg_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_unfilt_encode
+ *
+ * Purpose: Encode native information into raw form for storing on disk
+ * (non-filtered)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx)
+{
+ H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */
+ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
+ unsigned u; /* Local index varible */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(ctx);
+
+ /* Encode the record's fields */
+ H5F_addr_encode_len(ctx->sizeof_addr, &raw, record->chunk_addr);
+ /* (Don't encode the chunk size & filter mask for non-filtered B-tree records) */
+ for(u = 0; u < ctx->ndims; u++)
+ UINT64ENCODE(raw, record->scaled[u]);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_unfilt_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_unfilt_decode
+ *
+ * Purpose: Decode raw disk form of record into native form
+ * (non-filtered)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_unfilt_decode(const uint8_t *raw, void *_record, void *_ctx)
+{
+ H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */
+ H5D_chunk_rec_t *record = (H5D_chunk_rec_t *)_record; /* The native record */
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(ctx);
+
+ /* Decode the record's fields */
+ H5F_addr_decode_len(ctx->sizeof_addr, &raw, &record->chunk_addr);
+ record->nbytes = ctx->chunk_size;
+ record->filter_mask = 0;
+ for(u = 0; u < ctx->ndims; u++)
+ UINT64DECODE(raw, record->scaled[u]);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_unfilt_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_unfilt_debug
+ *
+ * Purpose: Debug native form of record (non-filtered)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth,
+ const void *_record, const void *_u_ctx)
+{
+ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
+ const H5D_bt2_ctx_ud_t *u_ctx = (const H5D_bt2_ctx_ud_t *)_u_ctx; /* User data for creating callback context */
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(record);
+ HDassert(u_ctx->chunk_size == record->nbytes);
+ HDassert(0 == record->filter_mask);
+
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, "Chunk address:", record->chunk_addr);
+ HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:");
+ for(u = 0; u < u_ctx->ndims; u++)
+ HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * u_ctx->dim[u]);
+ HDfputs("}\n", stream);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_unfilt_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_filt_encode
+ *
+ * Purpose: Encode native information into raw form for storing on disk
+ * (filtered)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_filt_encode(uint8_t *raw, const void *_record, void *_ctx)
+{
+ H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */
+ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(ctx);
+ HDassert(record);
+ HDassert(H5F_addr_defined(record->chunk_addr));
+ HDassert(0 != record->nbytes);
+
+ /* Encode the record's fields */
+ H5F_addr_encode_len(ctx->sizeof_addr, &raw, record->chunk_addr);
+ UINT64ENCODE_VAR(raw, record->nbytes, ctx->chunk_size_len);
+ UINT32ENCODE(raw, record->filter_mask);
+ for(u = 0; u < ctx->ndims; u++)
+ UINT64ENCODE(raw, record->scaled[u]);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_filt_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_filt_decode
+ *
+ * Purpose: Decode raw disk form of record into native form
+ * (filtered)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_filt_decode(const uint8_t *raw, void *_record, void *_ctx)
+{
+ H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */
+ H5D_chunk_rec_t *record = (H5D_chunk_rec_t *)_record; /* The native record */
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(ctx);
+ HDassert(record);
+
+ /* Decode the record's fields */
+ H5F_addr_decode_len(ctx->sizeof_addr, &raw, &record->chunk_addr);
+ UINT64DECODE_VAR(raw, record->nbytes, ctx->chunk_size_len);
+ UINT32DECODE(raw, record->filter_mask);
+ for(u = 0; u < ctx->ndims; u++)
+ UINT64DECODE(raw, record->scaled[u]);
+
+ /* Sanity checks */
+ HDassert(H5F_addr_defined(record->chunk_addr));
+ HDassert(0 != record->nbytes);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_filt_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_filt_debug
+ *
+ * Purpose: Debug native form of record (filterd)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth,
+ const void *_record, const void *_u_ctx)
+{
+ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
+ const H5D_bt2_ctx_ud_t *u_ctx = (const H5D_bt2_ctx_ud_t *)_u_ctx; /* User data for creating callback context */
+ unsigned u; /* Local index variable */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(record);
+ HDassert(H5F_addr_defined(record->chunk_addr));
+ HDassert(0 != record->nbytes);
+
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, "Chunk address:", record->chunk_addr);
+ HDfprintf(stream, "%*s%-*s %u bytes\n", indent, "", fwidth, "Chunk size:", (unsigned)record->nbytes);
+ HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", record->filter_mask);
+
+ HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:");
+ for(u = 0; u < u_ctx->ndims; u++)
+ HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * u_ctx->dim[u]);
+ HDfputs("}\n", stream);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_filt_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_init
+ *
+ * Purpose: Initialize the indexing information for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Wednesday, May 23, 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info,
+ const H5S_t H5_ATTR_UNUSED *space, haddr_t dset_ohdr_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(H5F_addr_defined(dset_ohdr_addr));
+
+ idx_info->storage->u.btree2.dset_ohdr_addr = dset_ohdr_addr;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__bt2_idx_init() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_open()
+ *
+ * Purpose: Opens an existing v2 B-tree.
+ *
+ * Note: This information is passively initialized from each index
+ * operation callback because those abstract chunk index operations
+ * are designed to work with the v2 B-tree chunk indices also,
+ * which don't require an 'open' for the data structure.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info)
+{
+ H5D_bt2_ctx_ud_t u_ctx; /* user data for creating context */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(NULL == idx_info->storage->u.btree2.bt2);
+
+ /* Set up the user data */
+ u_ctx.f = idx_info->f;
+ u_ctx.ndims = idx_info->layout->ndims - 1;
+ u_ctx.chunk_size = idx_info->layout->size;
+ u_ctx.dim = idx_info->layout->dim;
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+ } /* end if */
+
+ /* Open v2 B-tree for the chunk index */
+ if(NULL == (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr , &u_ctx, oh_proxy)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset")
+
+done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_open() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_create
+ *
+ * Purpose: Create the v2 B-tree for tracking dataset chunks
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info)
+{
+ H5B2_create_t bt2_cparam; /* v2 B-tree creation parameters */
+ H5D_bt2_ctx_ud_t u_ctx; /* data for context call */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(!H5F_addr_defined(idx_info->storage->idx_addr));
+
+ bt2_cparam.rrec_size = H5F_SIZEOF_ADDR(idx_info->f) /* Address of chunk */
+ + (idx_info->layout->ndims - 1) * 8; /* # of dimensions x 64-bit chunk offsets */
+
+ /* General parameters */
+ if(idx_info->pline->nused > 0) {
+ unsigned chunk_size_len; /* Size of encoded chunk size */
+
+ /*
+ * Compute the size required for encoding the size of a chunk,
+ * allowing for an extra byte, in case the filter makes the chunk larger.
+ */
+ chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8);
+ if(chunk_size_len > 8)
+ chunk_size_len = 8;
+
+ bt2_cparam.rrec_size += chunk_size_len + 4; /* Size of encoded chunk size & filter mask */
+ bt2_cparam.cls = H5D_BT2_FILT;
+ } /* end if */
+ else
+ bt2_cparam.cls = H5D_BT2;
+
+ bt2_cparam.node_size = idx_info->layout->u.btree2.cparam.node_size;
+ bt2_cparam.split_percent = idx_info->layout->u.btree2.cparam.split_percent;
+ bt2_cparam.merge_percent = idx_info->layout->u.btree2.cparam.merge_percent;
+
+ u_ctx.f = idx_info->f;
+ u_ctx.ndims = idx_info->layout->ndims - 1;
+ u_ctx.chunk_size = idx_info->layout->size;
+ u_ctx.dim = idx_info->layout->dim;
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+ } /* end if */
+
+ /* Create the v2 B-tree for the chunked dataset */
+ if(NULL == (idx_info->storage->u.btree2.bt2 = H5B2_create(idx_info->f, idx_info->dxpl_id, &bt2_cparam, &u_ctx, oh_proxy)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create v2 B-tree for tracking chunked dataset")
+
+ /* Retrieve the v2 B-tree's address in the file */
+ if(H5B2_get_addr(idx_info->storage->u.btree2.bt2, &(idx_info->storage->idx_addr)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get v2 B-tree address for tracking chunked dataset")
+
+done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_is_space_alloc
+ *
+ * Purpose: Query if space is allocated for index method
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr))
+} /* end H5D__bt2_idx_is_space_alloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_mod_cb
+ *
+ * Purpose: Modify record for dataset chunk when it is found in a v2 B-tree.
+ * This is the callback for H5B2_modify() which is called in
+ * H5D__bt2_idx_insert().
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_mod_cb(void *_record, void *_op_data, hbool_t *changed)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ *(H5D_chunk_rec_t *)_record = *(H5D_chunk_rec_t *)_op_data;
+
+ /* Note that the record changed */
+ *changed = TRUE;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__bt2_mod_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_insert
+ *
+ * Purpose: Insert chunk address into the indexing structure.
+ * A non-filtered chunk:
+ * Should not exist
+ * Allocate the chunk and pass chunk address back up
+ * A filtered chunk:
+ * If it was not found, create the chunk and pass chunk address back up
+ * If it was found but its size changed, reallocate the chunk and pass chunk address back up
+ * If it was found but its size was the same, pass chunk address back up
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata,
+ const H5D_t H5_ATTR_UNUSED *dset)
+{
+ H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */
+ H5D_bt2_ud_t bt2_udata; /* User data for v2 B-tree calls */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+ HDassert(H5F_addr_defined(udata->chunk_block.offset));
+
+ /* Check if the v2 B-tree is open yet */
+ if(NULL == idx_info->storage->u.btree2.bt2)
+ /* Open existing v2 B-tree */
+ if(H5D__bt2_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree")
+
+ /* Set convenience pointer to v2 B-tree structure */
+ bt2 = idx_info->storage->u.btree2.bt2;
+
+ /* Set up callback info */
+ bt2_udata.ndims = idx_info->layout->ndims - 1;
+ bt2_udata.rec.chunk_addr = udata->chunk_block.offset;
+ if(idx_info->pline->nused > 0) { /* filtered chunk */
+ H5_CHECKED_ASSIGN(bt2_udata.rec.nbytes, uint32_t, udata->chunk_block.length, hsize_t);
+ bt2_udata.rec.filter_mask = udata->filter_mask;
+ } /* end if */
+ else { /* non-filtered chunk */
+ bt2_udata.rec.nbytes = idx_info->layout->size;
+ bt2_udata.rec.filter_mask = 0;
+ } /* end else */
+ for(u = 0; u < (idx_info->layout->ndims - 1); u++)
+ bt2_udata.rec.scaled[u] = udata->common.scaled[u];
+
+ if(udata->need_modify) {
+ HDassert(idx_info->pline->nused > 0);
+
+ /* Modify record for v2 B-tree */
+ if(H5B2_modify(bt2, idx_info->dxpl_id, &bt2_udata, H5D__bt2_mod_cb, &bt2_udata.rec) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to modify record in v2 B-tree")
+ } /* end if */
+ else {
+ /* Insert record for object in v2 B-tree */
+ if(H5B2_insert(bt2, idx_info->dxpl_id, &bt2_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "couldn't insert record in v2 B-tree")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_idx_insert() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_found_cb
+ *
+ * Purpose: Retrieve record for dataset chunk when it is found in a v2 B-tree.
+ * This is the callback for H5B2_find() which is called in
+ * H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert().
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_found_cb(const void *nrecord, void *op_data)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ *(H5D_chunk_rec_t *)op_data = *(const H5D_chunk_rec_t *)nrecord;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__bt2_found_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_get_addr
+ *
+ * Purpose: Get the file address of a chunk if file space has been
+ * assigned. Save the retrieved information in the udata
+ * supplied.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
+{
+ H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */
+ H5D_bt2_ud_t bt2_udata; /* User data for v2 B-tree calls */
+ H5D_chunk_rec_t found_rec; /* Record found from searching for object */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->layout->ndims > 0);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the v2 B-tree is open yet */
+ if(NULL == idx_info->storage->u.btree2.bt2)
+ /* Open existing v2 B-tree */
+ if(H5D__bt2_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree")
+
+ /* Set convenience pointer to v2 B-tree structure */
+ bt2 = idx_info->storage->u.btree2.bt2;
+
+ /* Clear the found record */
+ found_rec.chunk_addr = HADDR_UNDEF;
+ found_rec.nbytes = 0;
+ found_rec.filter_mask = 0;
+
+ /* Prepare user data for compare callback */
+ bt2_udata.rec.chunk_addr = HADDR_UNDEF;
+ bt2_udata.ndims = idx_info->layout->ndims - 1;
+
+ /* Set the chunk offset to be searched for */
+ for(u = 0; u < (idx_info->layout->ndims - 1); u++)
+ bt2_udata.rec.scaled[u] = udata->common.scaled[u];
+
+ /* Go get chunk information from v2 B-tree */
+ if(H5B2_find(bt2, idx_info->dxpl_id, &bt2_udata, H5D__bt2_found_cb, &found_rec) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in v2 B-tree")
+
+ /* Set common info for the chunk */
+ udata->chunk_block.offset = found_rec.chunk_addr;
+
+ /* Check for setting other info */
+ if(H5F_addr_defined(udata->chunk_block.offset)) {
+ /* Sanity check */
+ HDassert(0 != found_rec.nbytes);
+
+ /* Set other info for the chunk */
+ if(idx_info->pline->nused > 0) { /* filtered chunk */
+ udata->chunk_block.length = found_rec.nbytes;
+ udata->filter_mask = found_rec.filter_mask;
+ } /* end if */
+ else { /* non-filtered chunk */
+ udata->chunk_block.length = idx_info->layout->size;
+ udata->filter_mask = 0;
+ } /* end else */
+ } /* end if */
+ else {
+ udata->chunk_block.length = 0;
+ udata->filter_mask = 0;
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_idx_get_addr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_iterate_cb
+ *
+ * Purpose: Translate the B-tree specific chunk record into a generic
+ * form and make the callback to the generic chunk callback
+ * routine.
+ * This is the callback for H5B2_iterate() which is called in
+ * H5D__bt2_idx_iterate().
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__bt2_idx_iterate_cb(const void *_record, void *_udata)
+{
+ H5D_bt2_it_ud_t *udata = (H5D_bt2_it_ud_t *)_udata; /* User data */
+ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* Native record */
+ int ret_value = -1; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Make "generic chunk" callback */
+ if((ret_value = (udata->cb)(record, udata->udata)) < 0)
+ HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback");
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_idx_iterate_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_iterate
+ *
+ * Purpose: Iterate over the chunks in an index, making a callback
+ * for each one.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata)
+{
+ H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */
+ H5D_bt2_it_ud_t udata; /* User data for B-tree iterator callback */
+ int ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(chunk_cb);
+ HDassert(chunk_udata);
+
+ /* Check if the v2 B-tree is open yet */
+ if(NULL == idx_info->storage->u.btree2.bt2)
+ /* Open existing v2 B-tree */
+ if(H5D__bt2_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree")
+
+ /* Set convenience pointer to v2 B-tree structure */
+ bt2 = idx_info->storage->u.btree2.bt2;
+
+ /* Prepare user data for iterate callback */
+ udata.cb = chunk_cb;
+ udata.udata = chunk_udata;
+
+ /* Iterate over the records in the v2 B-tree */
+ if((ret_value = H5B2_iterate(bt2, idx_info->dxpl_id, H5D__bt2_idx_iterate_cb, &udata)) < 0)
+ HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over chunk v2 B-tree");
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_iterate() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_remove_cb()
+ *
+ * Purpose: Free space for 'dataset chunk' object as v2 B-tree
+ * is being deleted or v2 B-tree node is removed.
+ * This is the callback for H5B2_remove() and H5B2_delete() which
+ * which are called in H5D__bt2_idx_remove() and H5D__bt2_idx_delete().
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_remove_cb(const void *_record, void *_udata)
+{
+ const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */
+ H5D_bt2_remove_ud_t *udata = (H5D_bt2_remove_ud_t *)_udata; /* User data for removal callback */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Free the space in the file for the object being removed */
+ H5_CHECK_OVERFLOW(record->nbytes, uint32_t, hsize_t);
+ if(H5MF_xfree(udata->f, H5FD_MEM_DRAW, udata->dxpl_id, record->chunk_addr, (hsize_t)record->nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_remove_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_remove
+ *
+ * Purpose: Remove chunk from index.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata)
+{
+ H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */
+ H5D_bt2_remove_ud_t remove_udata; /* User data for removal callback */
+ H5D_bt2_ud_t bt2_udata; /* User data for v2 B-tree find call */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the v2 B-tree is open yet */
+ if(NULL == idx_info->storage->u.btree2.bt2)
+ /* Open existing v2 B-tree */
+ if(H5D__bt2_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree")
+
+ /* Set convenience pointer to v2 B-tree structure */
+ bt2 = idx_info->storage->u.btree2.bt2;
+
+ /* Initialize user data for removal callback */
+ remove_udata.f = idx_info->f;
+ remove_udata.dxpl_id = idx_info->dxpl_id;
+
+ /* Prepare user data for compare callback */
+ bt2_udata.ndims = idx_info->layout->ndims - 1;
+
+ /* Initialize the record to search for */
+ for(u = 0; u < (idx_info->layout->ndims - 1); u++)
+ bt2_udata.rec.scaled[u] = udata->scaled[u];
+
+ /* Remove the record for the "dataset chunk" object from the v2 B-tree */
+ /* (space in the file for the object is freed in the 'remove' callback) */
+ if(H5B2_remove(bt2, idx_info->dxpl_id, &bt2_udata, (H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) ? NULL : H5D__bt2_remove_cb, &remove_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__bt2_idx_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_delete
+ *
+ * Purpose: Delete index and raw data storage for entire dataset
+ * (i.e. all chunks)
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ * Modifications:
+ * Vailin Choi; March 2011
+ * Initialize size of an unfiltered chunk.
+ * This is a fix for for the assertion failure in:
+ * [src/H5FSsection.c:968: H5FS_sect_link_size: Assertion `bin < sinfo->nbins' failed.]
+ * which is uncovered by test_unlink_chunked_dataset() in test/unlink.c
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info)
+{
+ H5D_bt2_remove_ud_t remove_udata; /* User data for removal callback */
+ H5B2_remove_t remove_op; /* The removal callback */
+ H5D_bt2_ctx_ud_t u_ctx; /* data for context call */
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+
+ /* Check if the index data structure has been allocated */
+ if(H5F_addr_defined(idx_info->storage->idx_addr)) {
+ /* Set up user data for creating context */
+ u_ctx.f = idx_info->f;
+ u_ctx.ndims = idx_info->layout->ndims - 1;
+ u_ctx.chunk_size = idx_info->layout->size;
+ u_ctx.dim = idx_info->layout->dim;
+
+ /* Initialize user data for removal callback */
+ remove_udata.f = idx_info->f;
+ remove_udata.dxpl_id = idx_info->dxpl_id;
+
+ /* Set remove operation. Do not remove chunks in SWMR_WRITE mode */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)
+ remove_op = NULL;
+ else
+ remove_op = H5D__bt2_remove_cb;
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE) {
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.btree.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+ } /* end if */
+
+ /* Delete the v2 B-tree */
+ /*(space in the file for each object is freed in the 'remove' callback) */
+ if(H5B2_delete(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &u_ctx, oh_proxy, remove_op, &remove_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
+
+ idx_info->storage->idx_addr = HADDR_UNDEF;
+ } /* end if */
+
+done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_delete() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_copy_setup
+ *
+ * Purpose: Set up any necessary information for copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Source file */
+ HDassert(idx_info_src);
+ HDassert(idx_info_src->f);
+ HDassert(idx_info_src->pline);
+ HDassert(idx_info_src->layout);
+ HDassert(idx_info_src->storage);
+
+ /* Destination file */
+ HDassert(idx_info_dst);
+ HDassert(idx_info_dst->f);
+ HDassert(idx_info_dst->pline);
+ HDassert(idx_info_dst->layout);
+ HDassert(idx_info_dst->storage);
+ HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr));
+
+ /* Check if the source v2 B-tree is open yet */
+ if(NULL == idx_info_src->storage->u.btree2.bt2)
+ if(H5D__bt2_idx_open(idx_info_src) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree")
+
+ /* Set copied metadata tag */
+ H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL);
+
+ /* Create v2 B-tree that describes the chunked dataset in the destination file */
+ if(H5D__bt2_idx_create(idx_info_dst) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
+ HDassert(H5F_addr_defined(idx_info_dst->storage->idx_addr));
+
+ /* Reset metadata tag */
+ H5_END_TAG(FAIL);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_copy_setup() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_copy_shutdown
+ *
+ * Purpose: Shutdown any information from copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
+ H5O_storage_chunk_t *storage_dst, hid_t H5_ATTR_UNUSED dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(storage_src);
+ HDassert(storage_src->u.btree2.bt2);
+ HDassert(storage_dst);
+ HDassert(storage_dst->u.btree2.bt2);
+
+ /* Close v2 B-tree for source file */
+ if(H5B2_close(storage_src->u.btree2.bt2, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree")
+ storage_src->u.btree2.bt2 = NULL;
+
+ /* Close v2 B-tree for destination file */
+ if(H5B2_close(storage_dst->u.btree2.bt2, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree")
+ storage_dst->u.btree2.bt2 = NULL;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_copy_shutdown() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_size
+ *
+ * Purpose: Retrieve the amount of index storage for chunked dataset
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
+{
+ H5B2_t *bt2_cdset = NULL; /* Pointer to v2 B-tree structure */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(index_size);
+
+ /* Open v2 B-tree */
+ if(H5D__bt2_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree")
+
+ /* Set convenience pointer to v2 B-tree structure */
+ bt2_cdset = idx_info->storage->u.btree2.bt2;
+
+ /* Get v2 B-tree size for indexing chunked dataset */
+ if(H5B2_size(bt2_cdset, idx_info->dxpl_id, index_size) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't retrieve v2 B-tree storage info for chunked dataset")
+
+done:
+ /* Close v2 B-tree index */
+ if(bt2_cdset && H5B2_close(bt2_cdset, idx_info->dxpl_id) < 0)
+ HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for tracking chunked dataset")
+ idx_info->storage->u.btree2.bt2 = NULL;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_reset
+ *
+ * Purpose: Reset indexing information.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(storage);
+
+ /* Reset index info */
+ if(reset_addr)
+ storage->idx_addr = HADDR_UNDEF;
+ storage->u.btree2.bt2 = NULL;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__bt2_idx_reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_dump
+ *
+ * Purpose: Dump indexing information to a stream.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(storage);
+ HDassert(stream);
+
+ HDfprintf(stream, " Address: %a\n", storage->idx_addr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__bt2_idx_dump() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__bt2_idx_dest
+ *
+ * Purpose: Release indexing information in memory.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; June 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->storage);
+
+ /* Check if the v2-btree is open */
+ if(idx_info->storage->u.btree2.bt2) {
+ /* Close v2 B-tree */
+ if(H5B2_close(idx_info->storage->u.btree2.bt2, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree")
+ idx_info->storage->u.btree2.bt2 = NULL;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__bt2_idx_dest() */
+
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index ab621ca..1310a97 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -72,6 +72,15 @@
#define H5D_CHUNK_GET_NODE_INFO(map, node) (map->use_single ? map->single_chunk_info : (H5D_chunk_info_t *)H5SL_item(node))
#define H5D_CHUNK_GET_NEXT_NODE(map, node) (map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node))
+/* Sanity check on chunk index types: commonly used by a lot of routines in this file */
+#define H5D_CHUNK_STORAGE_INDEX_CHK(storage) \
+ HDassert((H5D_CHUNK_IDX_EARRAY == storage->idx_type && H5D_COPS_EARRAY == storage->ops) || \
+ (H5D_CHUNK_IDX_FARRAY == storage->idx_type && H5D_COPS_FARRAY == storage->ops) || \
+ (H5D_CHUNK_IDX_BT2 == storage->idx_type && H5D_COPS_BT2 == storage->ops) || \
+ (H5D_CHUNK_IDX_BTREE == storage->idx_type && H5D_COPS_BTREE == storage->ops) || \
+ (H5D_CHUNK_IDX_SINGLE == storage->idx_type && H5D_COPS_SINGLE == storage->ops) || \
+ (H5D_CHUNK_IDX_NONE == storage->idx_type && H5D_COPS_NONE == storage->ops));
+
/*
* Feature: If this constant is defined then every cache preemption and load
* causes a character to be printed on the standard error stream:
@@ -150,6 +159,8 @@ typedef struct H5D_chunk_it_ud3_t {
/* needed for compressed variable-length data */
const H5O_pline_t *pline; /* Filter pipeline */
+ unsigned dset_ndims; /* Number of dimensions in dataset */
+ const hsize_t *dset_dims; /* Dataset dimensions */
/* needed for copy object pointed by refs */
H5O_copy_t *cpy_info; /* Copy options */
@@ -163,6 +174,13 @@ typedef struct H5D_chunk_it_ud4_t {
uint32_t *chunk_dim; /* Chunk dimensions */
} H5D_chunk_it_ud4_t;
+/* Callback info for iteration to format convert chunks */
+typedef struct H5D_chunk_it_ud5_t {
+ H5D_chk_idx_info_t *new_idx_info; /* Dest. chunk index info object */
+ unsigned dset_ndims; /* Number of dimensions in dataset */
+ hsize_t *dset_dims; /* Dataset dimensions */
+} H5D_chunk_it_ud5_t;
+
/* Callback info for nonexistent readvv operation */
typedef struct H5D_chunk_readvv_ud_t {
unsigned char *rbuf; /* Read buffer to initialize */
@@ -213,9 +231,12 @@ H5D__nonexistent_readvv(const H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]);
+/* format convert cb */
+static int H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
+
/* Helper routines */
static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims,
- const hsize_t *curr_dims);
+ const hsize_t *curr_dims, const hsize_t *max_dims);
static void *H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline);
static void *H5D__chunk_mem_xfree(void *chk, const H5O_pline_t *pline);
static void *H5D__chunk_mem_realloc(void *chk, size_t size,
@@ -240,11 +261,14 @@ static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t flush);
+static hbool_t H5D__chunk_is_partial_edge_chunk(const hsize_t *chunk_scaled,
+ unsigned dset_ndims, const hsize_t *dset_dims, const uint32_t *chunk_dims);
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, size_t size);
-static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata);
+static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk);
static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info,
- const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert);
+ const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert,
+ hbool_t *need_bt2_modify, hsize_t scaled[]);
#ifdef H5_HAVE_PARALLEL
static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf);
@@ -377,7 +401,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
/* Create the chunk it if it doesn't exist, or reallocate the chunk
* if its size changed.
*/
- if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, &udata.need_modify, scaled) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
/* Make sure the address of the chunk is returned. */
@@ -404,7 +428,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Insert the chunk record into the index */
- if(need_insert && layout->storage.u.chunk.ops->insert) {
+ if((need_insert || udata.need_modify) && layout->storage.u.chunk.ops->insert) {
/* Set the chunk's filter mask to the new settings */
udata.filter_mask = filters;
@@ -430,7 +454,8 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims)
+H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims,
+ const hsize_t *curr_dims, const hsize_t *max_dims)
{
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -443,17 +468,21 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize
HDassert(curr_dims);
/* Compute the # of chunks in dataset dimensions */
- for(u = 0, layout->nchunks = 1; u < ndims; u++) {
+ for(u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) {
/* Round up to the next integer # of chunks, to accomodate partial chunks */
layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
+ layout->max_chunks[u] = ((max_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
/* Accumulate the # of chunks */
layout->nchunks *= layout->chunks[u];
+ layout->max_nchunks *= layout->max_chunks[u];
} /* end for */
/* Get the "down" sizes for each dimension */
if(H5VM_array_down(ndims, layout->chunks, layout->down_chunks) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value")
+ if(H5VM_array_down(ndims, layout->max_chunks, layout->max_down_chunks) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -483,7 +512,7 @@ H5D__chunk_set_info(const H5D_t *dset)
HDassert(dset);
/* Set the base layout information */
- if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, dset->shared->ndims, dset->shared->curr_dims) < 0)
+ if(H5D__chunk_set_info_real(&dset->shared->layout.u.chunk, dset->shared->ndims, dset->shared->curr_dims, dset->shared->max_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info")
/* Call the index's "resize" callback */
@@ -596,6 +625,7 @@ H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id)
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Convenience pointer to dataset's chunk cache */
H5P_genplist_t *dapl; /* Data access property list object pointer */
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -603,6 +633,7 @@ H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id)
/* Sanity check */
HDassert(f);
HDassert(dset);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
if(NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for fapl ID")
@@ -686,12 +717,14 @@ done:
hbool_t
H5D__chunk_is_space_alloc(const H5O_storage_t *storage)
{
+ const H5O_storage_chunk_t *sc = &(storage->u.chunk);
hbool_t ret_value = FALSE; /* Return value */
FUNC_ENTER_PACKAGE_NOERR
/* Sanity checks */
HDassert(storage);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
/* Query index layer */
ret_value = (storage->u.chunk.ops->is_space_alloc)(&storage->u.chunk);
@@ -1020,9 +1053,8 @@ H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline)
FUNC_ENTER_STATIC_NOERR
HDassert(size);
- HDassert(pline);
- if(pline->nused > 0)
+ if(pline && pline->nused)
ret_value = H5MM_malloc(size);
else
ret_value = H5FL_BLK_MALLOC(chunk, size);
@@ -1050,10 +1082,8 @@ H5D__chunk_mem_xfree(void *chk, const H5O_pline_t *pline)
{
FUNC_ENTER_STATIC_NOERR
- HDassert(pline);
-
if(chk) {
- if(pline->nused > 0)
+ if(pline && pline->nused)
H5MM_xfree(chk);
else
chk = H5FL_BLK_FREE(chunk, chk);
@@ -1343,6 +1373,9 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
HDmemcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims);
new_chunk_info->scaled[fm->f_ndims] = 0;
+ /* Copy the chunk's scaled coordinates */
+ HDmemcpy(new_chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims);
+
/* Insert the new chunk into the skip list */
if(H5SL_insert(fm->sel_chunks, new_chunk_info, &new_chunk_info->index) < 0) {
H5D__free_chunk_info(new_chunk_info, NULL, NULL);
@@ -1600,6 +1633,7 @@ H5D__chunk_file_cb(void H5_ATTR_UNUSED *elem, const H5T_t H5_ATTR_UNUSED *type,
/* Set the chunk's scaled coordinates */
HDmemcpy(chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims);
chunk_info->scaled[fm->f_ndims] = 0;
+ HDmemcpy(chunk_info->scaled, scaled, sizeof(hsize_t) * fm->f_ndims);
/* Insert the new chunk into the skip list */
if(H5SL_insert(fm->sel_chunks,chunk_info,&chunk_info->index) < 0) {
@@ -1729,6 +1763,7 @@ htri_t
H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_op)
{
const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
+ hbool_t no_filters = TRUE;
htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_PACKAGE
@@ -1736,10 +1771,22 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_
HDassert(io_info);
HDassert(dataset);
- /* Must bring the whole chunk in if there are any filters */
- if(dataset->shared->dcpl_cache.pline.nused > 0)
- ret_value = TRUE;
- else {
+ /* Must bring the whole chunk in if there are any filters on the chunk.
+ * Make sure to check if filters are on the dataset but disabled for the
+ * chunk because it is a partial edge chunk. */
+ if(dataset->shared->dcpl_cache.pline.nused > 0) {
+ if(dataset->shared->layout.u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ no_filters = H5D__chunk_is_partial_edge_chunk(
+ io_info->store->chunk.scaled, io_info->dset->shared->ndims,
+ io_info->dset->shared->curr_dims,
+ io_info->dset->shared->layout.u.chunk.dim);
+ } /* end if */
+ else
+ no_filters = FALSE;
+ } /* end if */
+
+ if(no_filters) {
#ifdef H5_HAVE_PARALLEL
/* If MPI based VFD is used and the file is opened for write access, must
* bypass the chunk-cache scheme because other MPI processes could
@@ -1780,7 +1827,9 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr, hbool_t write_
#ifdef H5_HAVE_PARALLEL
} /* end else */
#endif /* H5_HAVE_PARALLEL */
- } /* end else */
+ } /* end if */
+ else
+ ret_value = TRUE;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1885,6 +1934,10 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
void *chunk = NULL; /* Pointer to locked chunk buffer */
htri_t cacheable; /* Whether the chunk is cacheable */
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
+ /* Load the chunk into cache and lock it. */
/* Determine if we should use the chunk cache */
if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
@@ -1895,11 +1948,8 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
- /* Set chunk's [scaled] coordinates */
- io_info->store->chunk.scaled = chunk_info->scaled;
-
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE)))
+ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -2011,6 +2061,9 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
(!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
/* Determine if we should use the chunk cache */
if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
@@ -2029,11 +2082,8 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
fm->fsel_type == H5S_SEL_POINTS)
entire_chunk = FALSE;
- /* Set chunk's [scaled] coordinates */
- io_info->store->chunk.scaled = chunk_info->scaled;
-
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk)))
+ if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -2056,7 +2106,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
/* Allocate the chunk */
- if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, &udata.need_modify, chunk_info->scaled) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Make sure the address of the chunk is returned. */
@@ -2088,7 +2138,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
else {
- if(need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
+ if((need_insert || udata.need_modify) && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
if((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
} /* end else */
@@ -2221,12 +2271,14 @@ H5D__chunk_dest(H5D_t *dset, hid_t dxpl_id)
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Dataset's chunk cache */
H5D_rdcc_ent_t *ent = NULL, *next = NULL; /* Pointer to current & next cache entries */
int nerrors = 0; /* Accumulated count of errors */
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
/* Sanity checks */
HDassert(dset);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
/* Fill the DXPL cache values for later use */
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
@@ -2287,6 +2339,7 @@ H5D_chunk_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
/* Sanity checks */
HDassert(storage);
HDassert(storage->ops);
+ H5D_CHUNK_STORAGE_INDEX_CHK(storage);
/* Reset index structures */
if((storage->ops->reset)(storage, reset_addr) < 0)
@@ -2351,6 +2404,7 @@ H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *ud
HDmemcpy(last->scaled, udata->common.scaled, sizeof(hsize_t) * udata->common.layout->ndims);
last->addr = udata->chunk_block.offset;
H5_CHECKED_ASSIGN(last->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
+ last->chunk_idx = udata->chunk_idx;
last->filter_mask = udata->filter_mask;
/* Indicate that the cached info is valid */
@@ -2397,6 +2451,7 @@ H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *uda
/* Retrieve the information from the cache */
udata->chunk_block.offset = last->addr;
udata->chunk_block.length = last->nbytes;
+ udata->chunk_idx = last->chunk_idx;
udata->filter_mask = last->filter_mask;
/* Indicate that the data was found */
@@ -2427,6 +2482,7 @@ herr_t
H5D__chunk_create(const H5D_t *dset /*in,out*/, hid_t dxpl_id)
{
H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -2435,6 +2491,8 @@ H5D__chunk_create(const H5D_t *dset /*in,out*/, hid_t dxpl_id)
HDassert(dset);
HDassert(H5D_CHUNKED == dset->shared->layout.type);
HDassert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
+
#ifndef NDEBUG
{
unsigned u; /* Local index variable */
@@ -2527,12 +2585,14 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
hbool_t found = FALSE; /* In cache? */
unsigned u; /* Counter */
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
HDassert(dset);
HDassert(dset->shared->layout.u.chunk.ndims > 0);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
HDassert(scaled);
HDassert(udata);
@@ -2545,6 +2605,8 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
udata->chunk_block.offset = HADDR_UNDEF;
udata->chunk_block.length = 0;
udata->filter_mask = 0;
+ udata->new_unfilt_chunk = FALSE;
+ udata->need_modify = FALSE;
/* Check for chunk in cache */
if(dset->shared->cache.chunk.nslots > 0) {
@@ -2563,6 +2625,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
if(found) {
udata->chunk_block.offset = ent->chunk_block.offset;
udata->chunk_block.length = ent->chunk_block.length;;
+ udata->chunk_idx = ent->chunk_idx;
} /* end if */
else {
/* Invalidate idx_hint, to signal that the chunk is not in cache */
@@ -2614,12 +2677,14 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
{
void *buf = NULL; /* Temporary buffer */
hbool_t point_of_no_return = FALSE;
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
HDassert(dset);
HDassert(dset->shared);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
HDassert(dxpl_cache);
HDassert(ent);
HDassert(!ent->locked);
@@ -2638,9 +2703,12 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
udata.chunk_block.offset = ent->chunk_block.offset;
udata.chunk_block.length = dset->shared->layout.u.chunk.size;
udata.filter_mask = 0;
+ udata.chunk_idx = ent->chunk_idx;
+ udata.need_modify = FALSE;
/* Should the chunk be filtered before writing it to disk? */
- if(dset->shared->dcpl_cache.pline.nused) {
+ if(dset->shared->dcpl_cache.pline.nused
+ && !(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)) {
size_t alloc = udata.chunk_block.length; /* Bytes allocated for BUF */
size_t nbytes; /* Chunk size (in bytes) */
@@ -2679,10 +2747,27 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
/* Indicate that the chunk must be allocated */
must_alloc = TRUE;
} /* end if */
- else if(!H5F_addr_defined(udata.chunk_block.offset))
+ else if(!H5F_addr_defined(udata.chunk_block.offset)) {
/* Indicate that the chunk must be allocated */
must_alloc = TRUE;
+ /* This flag could be set for this chunk, just remove and ignore it
+ */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+ else if(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS) {
+ /* Chunk on disk is still filtered, must insert to allocate correct
+ * size */
+ must_alloc = TRUE;
+
+ /* Set the disable filters field back to the standard disable
+ * filters setting, as it no longer needs to be inserted with every
+ * flush */
+ ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end else */
+
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS));
+
/* Check if the chunk needs to be allocated (it also could exist already
* and the chunk alloc operation could resize it)
*/
@@ -2697,7 +2782,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
/* Create the chunk it if it doesn't exist, or reallocate the chunk
* if its size changed.
*/
- if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, &udata.need_modify, ent->scaled) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Update the chunk entry's info, in case it was allocated or relocated */
@@ -2712,7 +2797,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Insert the chunk record into the index */
- if(need_insert && dset->shared->layout.storage.u.chunk.ops->insert)
+ if((need_insert || udata.need_modify) && dset->shared->layout.storage.u.chunk.ops->insert)
if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
@@ -2732,7 +2817,9 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
if(buf == ent->chunk)
buf = NULL;
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline)));
} /* end if */
done:
@@ -2748,7 +2835,9 @@ done:
*/
if(ret_value < 0 && point_of_no_return)
if(ent->chunk)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline)));
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D__chunk_flush_entry() */
@@ -2790,7 +2879,9 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
else {
/* Don't flush, just free chunk */
if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
+ : &(dset->shared->dcpl_cache.pline)));
} /* end else */
/* Unlink from list */
@@ -2804,8 +2895,19 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
rdcc->tail = ent->prev;
ent->prev = ent->next = NULL;
- /* Only clear hash table slot if chunk was not marked as deleted already */
- if(!ent->deleted)
+ /* Unlink from temporary list */
+ if(ent->tmp_prev) {
+ HDassert(rdcc->tmp_head->tmp_next);
+ ent->tmp_prev->tmp_next = ent->tmp_next;
+ if(ent->tmp_next) {
+ ent->tmp_next->tmp_prev = ent->tmp_prev;
+ ent->tmp_next = NULL;
+ } /* end if */
+ ent->tmp_prev = NULL;
+ } /* end if */
+ else
+ /* Only clear hash table slot if the chunk was not on the temporary list
+ */
rdcc->slot[ent->idx] = NULL;
/* Remove from cache */
@@ -2958,10 +3060,11 @@ done:
*/
void *
H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
- hbool_t relax)
+ hbool_t relax, hbool_t prev_unfilt_chunk)
{
const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_mem_alloc */
+ const H5O_pline_t *old_pline = pline; /* Old pipeline, i.e. pipeline used to read the chunk */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
@@ -2969,6 +3072,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent; /*cache entry */
size_t chunk_size; /*size of a chunk */
+ hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */
void *chunk = NULL; /*the file chunk */
void *ret_value = NULL; /* Return value */
@@ -2980,6 +3084,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HDassert(udata);
HDassert(dset);
HDassert(TRUE == H5P_isa_class(io_info->dxpl_id, H5P_DATASET_XFER));
+ HDassert(!(udata->new_unfilt_chunk && prev_unfilt_chunk));
+ HDassert(!rdcc->tmp_head);
/* Get the chunk's size */
HDassert(layout->u.chunk.size > 0);
@@ -2999,7 +3105,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
unsigned u; /*counters */
/* Make sure this is the right chunk */
- for(u = 0; u < layout->u.chunk.ndims; u++)
+ for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDassert(io_info->store->chunk.scaled[u] == ent->scaled[u]);
}
#endif /* NDEBUG */
@@ -3009,6 +3115,66 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
*/
rdcc->stats.nhits++;
+ /* Make adjustments if the edge chunk status changed recently */
+ if(pline->nused) {
+ /* If the chunk recently became an unfiltered partial edge chunk
+ * while in cache, we must make some changes to the entry */
+ if(udata->new_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must not have previously been a
+ * partial chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert(!(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(old_pline->nused);
+
+ /* Disable filters. Set pline to NULL instead of just the
+ * default pipeline to make a quick failure more likely if the
+ * code is changed in an inappropriate/incomplete way. */
+ pline = NULL;
+
+ /* Reallocate the chunk so H5D__chunk_mem_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
+
+ /* Mark the chunk as having filters disabled as well as "newly
+ * disabled" so it is inserted on flush */
+ ent->edge_chunk_state |= H5D_RDCC_DISABLE_FILTERS;
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ } /* end if */
+ else if(prev_unfilt_chunk) {
+ /* If this flag is set then partial chunk filters must be
+ * disabled, and the chunk must have previously been a partial
+ * chunk (with disabled filters) */
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+ HDassert((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS));
+ HDassert(pline->nused);
+
+ /* Mark the old pipeline as having been disabled */
+ old_pline = NULL;
+
+ /* Reallocate the chunk so H5D__chunk_mem_xfree doesn't get confused
+ */
+ if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ HDmemcpy(chunk, ent->chunk, chunk_size);
+
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline);
+ ent->chunk = (uint8_t *)chunk;
+ chunk = NULL;
+
+ /* Mark the chunk as having filters enabled */
+ ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS
+ | H5D_RDCC_NEWLY_DISABLED_FILTERS);
+ } /* end else */
+ } /* end if */
+
/*
* If the chunk is not at the beginning of the cache; move it backward
* by one slot. This is how we implement the LRU preemption
@@ -3037,6 +3203,41 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
chunk_addr = udata->chunk_block.offset;
chunk_alloc = udata->chunk_block.length;
+ /* Check if we should disable filters on this chunk */
+ if(pline->nused) {
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Disable the filters for writing */
+ disable_filters = TRUE;
+ pline = NULL;
+ } /* end if */
+ else if(prev_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Mark the filters as having been previously disabled (for the
+ * chunk as currently on disk) - disable the filters for reading
+ */
+ old_pline = NULL;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if this is an edge chunk */
+ if(H5D__chunk_is_partial_edge_chunk(
+ io_info->store->chunk.scaled, io_info->dset->shared->ndims,
+ io_info->dset->shared->curr_dims, layout->u.chunk.dim)) {
+ /* Disable the filters for both writing and reading */
+ disable_filters = TRUE;
+ old_pline = NULL;
+ pline = NULL;
+ } /* end if */
+ } /* end if */
+ } /* end if */
+ else
+ HDassert(!udata->new_unfilt_chunk && !prev_unfilt_chunk);
+
if(relax) {
/*
* Not in the cache, but we're about to overwrite the whole thing
@@ -3066,16 +3267,32 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
/* Chunk size on disk isn't [likely] the same size as the final chunk
* size in memory, so allocate memory big enough. */
- if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline)))
+ if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, (udata->new_unfilt_chunk ? old_pline : pline))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
- if(pline->nused)
- if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
- io_info->dxpl_cache->filter_cb, &my_chunk_alloc, &buf_alloc, &chunk) < 0)
+ if(old_pline && old_pline->nused) {
+ if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE,
+ &(udata->filter_mask),
+ io_info->dxpl_cache->err_detect,
+ io_info->dxpl_cache->filter_cb,
+ &my_chunk_alloc, &buf_alloc, &chunk) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
+ /* Reallocate chunk if necessary */
+ if(udata->new_unfilt_chunk) {
+ void *tmp_chunk = chunk;
+
+ if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline))) {
+ (void)H5D__chunk_mem_xfree(tmp_chunk, old_pline);
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
+ } /* end if */
+ HDmemcpy(chunk, tmp_chunk, chunk_size);
+ (void)H5D__chunk_mem_xfree(tmp_chunk, old_pline);
+ } /* end if */
+ } /* end if */
+
/* Increment # of cache misses */
rdcc->stats.nmisses++;
} /* end if */
@@ -3144,9 +3361,17 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
+ ent->locked = 0;
+ ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0;
+ if(udata->new_unfilt_chunk)
+ ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ ent->dirty = FALSE;
+ ent->deleted = FALSE;
+
/* Initialize the new entry */
ent->chunk_block.offset = chunk_addr;
ent->chunk_block.length = chunk_alloc;
+ ent->chunk_idx = udata->chunk_idx;
HDmemcpy(ent->scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
@@ -3167,6 +3392,9 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
} /* end if */
else
rdcc->head = rdcc->tail = ent;
+ ent->tmp_next = NULL;
+ ent->tmp_prev = NULL;
+
} /* end if */
else
/* We did not add the chunk to cache */
@@ -3249,13 +3477,36 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
* Note: we have to copy the layout and filter messages so we
* don't discard the `const' qualifier.
*/
+ hbool_t is_unfiltered_edge_chunk = FALSE; /* Whether the chunk is an unfiltered edge chunk */
+
+ /* Check if we should disable filters on this chunk */
+ if(udata->new_unfilt_chunk) {
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ is_unfiltered_edge_chunk = TRUE;
+ } /* end if */
+ else if(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ is_unfiltered_edge_chunk = H5D__chunk_is_partial_edge_chunk(
+ io_info->store->chunk.scaled, io_info->dset->shared->ndims,
+ io_info->dset->shared->curr_dims, layout->u.chunk.dim);
+ } /* end if */
+
if(dirty) {
H5D_rdcc_ent_t fake_ent; /* "fake" chunk cache entry */
HDmemset(&fake_ent, 0, sizeof(fake_ent));
fake_ent.dirty = TRUE;
+ if(is_unfiltered_edge_chunk)
+ fake_ent.edge_chunk_state = H5D_RDCC_DISABLE_FILTERS;
+ if(udata->new_unfilt_chunk)
+ fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
+ HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
HDassert(layout->u.chunk.size > 0);
+ fake_ent.chunk_idx = udata->chunk_idx;
fake_ent.chunk_block.offset = udata->chunk_block.offset;
fake_ent.chunk_block.length = udata->chunk_block.length;
fake_ent.chunk = (uint8_t *)chunk;
@@ -3265,7 +3516,8 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
} /* end if */
else {
if(chunk)
- chunk = H5D__chunk_mem_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
+ chunk = H5D__chunk_mem_xfree(chunk, (is_unfiltered_edge_chunk ? NULL
+ : &(io_info->dset->shared->dcpl_cache.pline)));
} /* end else */
} /* end if */
else {
@@ -3344,12 +3596,14 @@ H5D__chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes)
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
hsize_t chunk_bytes = 0; /* Number of bytes allocated for chunks */
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
HDassert(dset);
HDassert(dset->shared);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
/* Fill the DXPL cache values for later use */
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
@@ -3409,9 +3663,12 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ const H5O_pline_t def_pline = H5O_CRT_PIPELINE_DEF; /* Default pipeline */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* The fill value status */
hbool_t should_fill = FALSE; /* Whether fill values should be written */
+ void *unfilt_fill_buf = NULL; /* Unfiltered fill value buffer */
+ void **fill_buf = NULL; /* Pointer to the fill buffer to use for a chunk */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
@@ -3426,6 +3683,11 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
unsigned op_dim; /* Current operating dimension */
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
+ hbool_t has_unfilt_edge_chunks = FALSE; /* Whether there are partial edge chunks with disabled filters */
+ hbool_t unfilt_edge_chunk_dim[H5O_LAYOUT_NDIMS]; /* Whether there are unfiltered edge chunks at the edge of each dimension */
+ hsize_t edge_chunk_scaled[H5O_LAYOUT_NDIMS]; /* Offset of the unfiltered edge chunks at the edge of each dimension */
+ unsigned nunfilt_edge_chunk_dims = 0; /* Number of dimensions on an edge */
+ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, dset->oloc.addr, FAIL)
@@ -3433,6 +3695,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Check args */
HDassert(dset && H5D_CHUNKED == layout->type);
HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
/* Retrieve the dataset dimensions */
@@ -3467,6 +3730,29 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Calculate the minimum and maximum chunk offsets in each dimension, and
+ * determine if there are any unfiltered partial edge chunks. Note that we
+ * assume here that all elements of space_dim are > 0. This is checked at
+ * the top of this function. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim];
+ max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim];
+
+ /* Calculate if there are unfiltered edge chunks at the edge of this
+ * dimension. Note the edge_chunk_scaled is uninitialized for
+ * dimensions where unfilt_edge_chunk_dim is FALSE. Also */
+ if((layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && pline->nused > 0
+ && space_dim[op_dim] % chunk_dim[op_dim] != 0) {
+ has_unfilt_edge_chunks = TRUE;
+ unfilt_edge_chunk_dim[op_dim] = TRUE;
+ edge_chunk_scaled[op_dim] = max_unalloc[op_dim];
+ } /* end if */
+ else
+ unfilt_edge_chunk_dim[op_dim] = FALSE;
+ } /* end for */
+
/* Get original chunk size */
H5_CHECKED_ASSIGN(orig_chunk_size, size_t, layout->u.chunk.size, uint32_t);
@@ -3498,6 +3784,11 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
fb_info_init = TRUE;
+ /* Initialize the fill_buf pointer to the buffer in fb_info. If edge
+ * chunk filters are disabled, we will switch the buffer as appropriate
+ * for each chunk. */
+ fill_buf = &fb_info.fill_buf;
+
/* Check if there are filters which need to be applied to the chunk */
/* (only do this in advance when the chunk info can be re-used (i.e.
* it doesn't contain any non-default VL datatype fill values)
@@ -3505,6 +3796,14 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
size_t buf_size = orig_chunk_size;
+ /* If the dataset has disabled partial chunk filters, create a copy
+ * of the unfiltered fill_buf to use for partial chunks */
+ if(has_unfilt_edge_chunks) {
+ if(NULL == (unfilt_fill_buf = H5D__chunk_mem_alloc(orig_chunk_size, &def_pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk")
+ HDmemcpy(unfilt_fill_buf, fb_info.fill_buf, orig_chunk_size);
+ } /* end if */
+
/* Push the chunk through the filters */
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
@@ -3523,14 +3822,6 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
idx_info.layout = &dset->shared->layout.u.chunk;
idx_info.storage = &dset->shared->layout.storage.u.chunk;
- /* Calculate the minimum and maximum chunk offsets in each dimension. Note
- * that we assume here that all elements of space_dim are > 0. This is
- * checked at the top of this function. */
- for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
- min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim];
- max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim];
- } /* end for */
-
/* Loop over all chunks */
/* The algorithm is:
* For each dimension:
@@ -3552,8 +3843,10 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
* Note that min_unalloc & max_unalloc are in scaled coordinates.
*
*/
+ chunk_size = orig_chunk_size;
for(op_dim = 0; op_dim < space_ndims; op_dim++) {
H5D_chunk_ud_t udata; /* User data for querying chunk info */
+ unsigned u; /* Local index variable */
int i; /* Local index variable */
/* Check if allocation along this dimension is really necessary */
@@ -3564,34 +3857,55 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0])));
scaled[op_dim] = min_unalloc[op_dim];
+ if(has_unfilt_edge_chunks) {
+ /* Initialize nunfilt_edge_chunk_dims */
+ nunfilt_edge_chunk_dims = 0;
+ for(u = 0; u < space_ndims; u++)
+ if(unfilt_edge_chunk_dim[u] && scaled[u]
+ == edge_chunk_scaled[u])
+ nunfilt_edge_chunk_dims++;
+
+ /* Initialize chunk_size and fill_buf */
+ if(should_fill && !fb_info.has_vlen_fill_type) {
+ HDassert(fb_info_init);
+ HDassert(unfilt_fill_buf);
+ if(nunfilt_edge_chunk_dims) {
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ else {
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end else */
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
} /* end else */
while(!carry) {
+ /* None of the chunks should be allocated */
hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
/* Reset size of chunk in bytes, in case filtered size changes */
- chunk_size = orig_chunk_size;
+ /* chunk_size = orig_chunk_size; */
+ /* Look up this chunk */
+ if(H5D__chunk_lookup(dset, dxpl_id, scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
#ifndef NDEBUG
- /* None of the chunks should be allocated */
- {
- /* Look up this chunk */
- if(H5D__chunk_lookup(dset, dxpl_id, scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
-
- HDassert(!H5F_addr_defined(udata.chunk_block.offset));
- } /* end block */
+ if(H5D_CHUNK_IDX_NONE != layout->storage.u.chunk.idx_type)
+ HDassert(!H5F_addr_defined(udata.chunk_block.offset));
/* Make sure the chunk is really in the dataset and outside the
* original dimensions */
{
- unsigned u; /* Local index variable */
+ unsigned v; /* Local index variable */
hbool_t outside_orig = FALSE;
- for(u = 0; u < space_ndims; u++) {
- HDassert((scaled[u] * chunk_dim[u]) < space_dim[u]);
- if((scaled[u] * chunk_dim[u]) >= old_dim[u])
+ for(v = 0; v < space_ndims; v++) {
+ HDassert((scaled[v] * chunk_dim[v]) < space_dim[v]);
+ if((scaled[v] * chunk_dim[v]) >= old_dim[v])
outside_orig = TRUE;
} /* end for */
HDassert(outside_orig);
@@ -3602,6 +3916,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
if(fb_info_init && fb_info.has_vlen_fill_type) {
/* Sanity check */
HDassert(should_fill);
+ HDassert(!unfilt_fill_buf);
#ifdef H5_HAVE_PARALLEL
HDassert(!using_mpi); /* Can't write VL datatypes in parallel currently */
#endif
@@ -3620,7 +3935,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer")
/* Check if there are filters which need to be applied to the chunk */
- if(pline->nused > 0) {
+ if(!nunfilt_edge_chunk_dims) {
size_t nbytes = orig_chunk_size;
/* Push the chunk through the filters */
@@ -3636,18 +3951,24 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Keep the number of bytes the chunk turned in to */
chunk_size = nbytes;
} /* end if */
+ else
+ chunk_size = layout->u.chunk.size;
+
+ HDassert(*fill_buf == fb_info.fill_buf);
} /* end if */
/* Initialize the chunk information */
udata.common.layout = &layout->u.chunk;
udata.common.storage = &layout->storage.u.chunk;
udata.common.scaled = scaled;
+ udata.common.rdcc = &dset->shared->cache.chunk;
udata.chunk_block.offset = HADDR_UNDEF;
H5_CHECKED_ASSIGN(udata.chunk_block.length, uint32_t, chunk_size, size_t);
udata.filter_mask = filter_mask;
+ udata.need_modify = FALSE;
/* Allocate the chunk (with all processes) */
- if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0)
+ if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, &udata.need_modify, scaled) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
HDassert(H5F_addr_defined(udata.chunk_block.offset));
@@ -3676,7 +3997,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, dxpl_id, fb_info.fill_buf) < 0)
+ if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, dxpl_id, *fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -3684,10 +4005,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
} /* end if */
/* Insert the chunk record into the index */
- /* (Note that this isn't safe, from a SWMR perspective, unlike
- * serial operation. -QAK
- */
- if(need_insert && ops->insert)
+ if((need_insert || udata.need_modify) && ops->insert)
if((ops->insert)(&idx_info, &udata, dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
@@ -3700,8 +4018,31 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
scaled[i] = min_unalloc[i];
else
scaled[i] = 0;
- } /* end if */
+
+ /* Check if we just left the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i]
+ && edge_chunk_scaled[i] == max_unalloc[i]
+ && scaled[i] < edge_chunk_scaled[i]) {
+ nunfilt_edge_chunk_dims--;
+ if(should_fill && nunfilt_edge_chunk_dims == 0 && !fb_info.has_vlen_fill_type) {
+ HDassert(!H5D__chunk_is_partial_edge_chunk(scaled, space_ndims, space_dim, chunk_dim));
+ fill_buf = &fb_info.fill_buf;
+ chunk_size = orig_chunk_size;
+ } /* end if */
+ } /* end if */
+ } /* end if */
else {
+ /* Check if we just entered the edge in this dimension */
+ if(unfilt_edge_chunk_dim[i] && scaled[i] == edge_chunk_scaled[i]) {
+ HDassert(edge_chunk_scaled[i] == max_unalloc[i]);
+ nunfilt_edge_chunk_dims++;
+ if(should_fill && nunfilt_edge_chunk_dims == 1 && !fb_info.has_vlen_fill_type) {
+ HDassert(H5D__chunk_is_partial_edge_chunk(scaled, space_ndims, space_dim, chunk_dim));
+ fill_buf = &unfilt_fill_buf;
+ chunk_size = layout->u.chunk.size;
+ } /* end if */
+ } /* end if */
+
carry = FALSE;
break;
} /* end else */
@@ -3732,6 +4073,9 @@ done:
if(fb_info_init && H5D__fill_term(&fb_info) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
+ /* Free the unfiltered fill value buffer */
+ unfilt_fill_buf = H5D__chunk_mem_xfree(unfilt_fill_buf, &def_pline);
+
#ifdef H5_HAVE_PARALLEL
if(using_mpi && chunk_info.addr)
HDfree(chunk_info.addr);
@@ -3740,6 +4084,185 @@ done:
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5D__chunk_allocate() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_update_old_edge_chunks
+ *
+ * Purpose: Update all chunks which were previously partial edge
+ * chunks and are now complete. Determines exactly which
+ * chunks need to be updated and locks each into cache using
+ * the 'prev_unfilt_chunk' flag, then unlocks it, causing
+ * filters to be applied as necessary.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * April 14, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[])
+{
+ hsize_t old_edge_chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of first previously incomplete chunk in each dimension */
+ hsize_t max_edge_chunk_sc[H5O_LAYOUT_NDIMS]; /* largest offset of chunks that might need to be modified in each dimension */
+ hbool_t new_full_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of chunks in this dimension needs to be modified */
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
+ hsize_t chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
+ const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
+ unsigned space_ndims; /* Dataset's space rank */
+ const hsize_t *space_dim; /* Dataset's dataspace dimensions */
+ unsigned op_dim; /* Current operationg dimension */
+ H5D_io_info_t chk_io_info; /* Chunked I/O info object */
+ H5D_chunk_ud_t chk_udata; /* User data for locking chunk */
+ H5D_storage_t chk_store; /* Chunk storage information */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ void *chunk; /* The file chunk */
+ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
+ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Check args */
+ HDassert(dset && H5D_CHUNKED == layout->type);
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
+ HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
+ HDassert(pline->nused > 0);
+ HDassert(layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ /* Retrieve the dataset dimensions */
+ space_dim = dset->shared->curr_dims;
+ space_ndims = dset->shared->ndims;
+
+ /* The last dimension in chunk_offset is always 0 */
+ chunk_sc[space_ndims] = (hsize_t)0;
+
+ /* Check if any current dimensions are smaller than the chunk size, or if
+ * any old dimensions are 0. If so we do not have to do anything. */
+ for(op_dim=0; op_dim<space_ndims; op_dim++)
+ if((space_dim[op_dim] < chunk_dim[op_dim]) || old_dim[op_dim] == 0) {
+ /* Reset any cached chunk info for this dataset */
+ H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+
+ /*
+ * Initialize structures needed to lock chunks into cache
+ */
+ /* Fill the DXPL cache values for later use */
+ if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Set up chunked I/O info object, for operations on chunks (in callback).
+ * Note that we only need to set chunk_offset once, as the array's address
+ * will never change. */
+ chk_store.chunk.scaled = chunk_sc;
+ H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL);
+
+ /*
+ * Determine the edges of the dataset which need to be modified
+ */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Start off with this dimension marked as not needing to be modified */
+ new_full_dim[op_dim] = FALSE;
+
+ /* Calulate offset of first previously incomplete chunk in this
+ * dimension */
+ old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]);
+
+ /* Calculate the largest offset of chunks that might need to be
+ * modified in this dimension */
+ max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim],
+ MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1);
+
+ /* Check for old_dim aligned with chunk boundary in this dimension, if
+ * so we do not need to modify chunks along the edge in this dimension
+ */
+ if(old_dim[op_dim] % chunk_dim[op_dim] == 0)
+ continue;
+
+ /* Check if the dataspace expanded enough to cause the old edge chunks
+ * in this dimension to become full */
+ if((space_dim[op_dim]/chunk_dim[op_dim]) >= (old_edge_chunk_sc[op_dim] + 1))
+ new_full_dim[op_dim] = TRUE;
+ } /* end for */
+
+ /* Main loop: fix old edge chunks */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Check if allocation along this dimension is really necessary */
+ if(!new_full_dim[op_dim])
+ continue;
+ else {
+ HDassert(max_edge_chunk_sc[op_dim] == old_edge_chunk_sc[op_dim]);
+
+ /* Reset the chunk offset indices */
+ HDmemset(chunk_sc, 0, (space_ndims * sizeof(chunk_sc[0])));
+ chunk_sc[op_dim] = old_edge_chunk_sc[op_dim];
+
+ carry = FALSE;
+ } /* end if */
+
+ while(!carry) {
+ int i; /* Local index variable */
+
+ /* Make sure the chunk is really a former edge chunk */
+ HDassert(H5D__chunk_is_partial_edge_chunk(chunk_sc, space_ndims, old_dim, chunk_dim)
+ && !H5D__chunk_is_partial_edge_chunk(chunk_sc, space_ndims, space_dim, chunk_dim));
+
+ /* Lookup the chunk */
+ if(H5D__chunk_lookup(dset, dxpl_id, chunk_sc, &chk_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* If this chunk does not exist in cache or on disk, no need to do
+ * anything */
+ if(H5F_addr_defined(chk_udata.chunk_block.offset)
+ || (UINT_MAX != chk_udata.idx_hint)) {
+ /* Lock the chunk into cache. H5D__chunk_lock will take care of
+ * updating the chunk to no longer be an edge chunk. */
+ if(NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE)))
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
+
+ /* Unlock the chunk */
+ if(H5D__chunk_unlock(&chk_io_info, &chk_udata, TRUE, chunk, (uint32_t)0) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+
+ /* Increment indices */
+ carry = TRUE;
+ for(i = ((int)space_ndims - 1); i >= 0; --i) {
+ if((unsigned)i != op_dim) {
+ ++chunk_sc[i];
+ if(chunk_sc[i] > (hsize_t) max_edge_chunk_sc[i])
+ chunk_sc[i] = 0;
+ else {
+ carry = FALSE;
+ break;
+ } /* end else */
+ } /* end if */
+ } /* end for */
+ } /* end while(!carry) */
+
+ /* Adjust max_edge_chunk_sc so we don't modify the same chunk twice.
+ * Also check if this dimension started from 0 (and hence modified all
+ * of the old edge chunks. */
+ if(old_edge_chunk_sc[op_dim] == 0)
+ break;
+ else
+ --max_edge_chunk_sc[op_dim];
+ } /* end for(op_dim=0...) */
+
+ /* Reset any cached chunk info for this dataset */
+ H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_update_old_edge_chunks() */
+
#ifdef H5_HAVE_PARALLEL
/*-------------------------------------------------------------------------
@@ -3910,7 +4433,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
+H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk)
{
const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */
const H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
@@ -3937,6 +4460,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
/* Get the info for the chunk in the file */
if(H5D__chunk_lookup(dset, io_info->dxpl_id, scaled, &chk_udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ chk_udata.new_unfilt_chunk = new_unfilt_chunk;
/* If this chunk does not exist in cache or on disk, no need to do anything */
if(!H5F_addr_defined(chk_udata.chunk_block.offset) && UINT_MAX == chk_udata.idx_hint)
@@ -3968,7 +4492,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
- if(NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE)))
+ if(NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
@@ -4122,6 +4646,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hsize_t max_mod_chunk_sc[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk to modify in each dimension */
hssize_t max_fill_chunk_sc[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk that might be filled in each dimension */
hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */
+ hsize_t min_partial_chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of first partial (or empty) chunk in each dimension */
+ hbool_t new_unfilt_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension are newly unfiltered */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t chk_io_info; /* Chunked I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
@@ -4141,7 +4667,10 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled offset of current chunk */
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
uint32_t elmts_per_chunk; /* Elements in chunk */
+ hbool_t disable_edge_filters = FALSE; /* Whether to disable filters on partial edge chunks */
+ hbool_t new_unfilt_chunk = FALSE; /* Whether the chunk is newly unfiltered */
unsigned u; /* Local index variable */
+ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -4149,6 +4678,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
/* Check args */
HDassert(dset && H5D_CHUNKED == layout->type);
HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
HDassert(dxpl_cache);
/* Fill the DXPL cache values for later use */
@@ -4220,6 +4750,12 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
/* Initialize user data for removal */
idx_udata.layout = &layout->u.chunk;
idx_udata.storage = &layout->storage.u.chunk;
+ idx_udata.rdcc = rdcc;
+
+ /* Determine if partial edge chunk filters are disabled */
+ disable_edge_filters = (layout->u.chunk.flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (idx_info.pline->nused > 0);
/*
* Determine the chunks which need to be filled or removed
@@ -4247,13 +4783,32 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
min_mod_chunk_sc[op_dim] = space_dim[op_dim] / chunk_dim[op_dim];
/* Determine if we need to fill chunks in this dimension */
- if((hssize_t)min_mod_chunk_sc[op_dim] == max_fill_chunk_sc[op_dim])
+ if((hssize_t)min_mod_chunk_sc[op_dim] == max_fill_chunk_sc[op_dim]) {
fill_dim[op_dim] = TRUE;
- else
+
+ /* If necessary, check if chunks in this dimension that need to
+ * be filled are new partial edge chunks */
+ if(disable_edge_filters && old_dim[op_dim]
+ >= (min_mod_chunk_sc[op_dim] + 1))
+ new_unfilt_dim[op_dim] = TRUE;
+ else
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end if */
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
} /* end if */
- else
+ else {
fill_dim[op_dim] = FALSE;
+ new_unfilt_dim[op_dim] = FALSE;
+ } /* end else */
+
+ /* If necessary, calculate the smallest offset of non-previously full
+ * chunks in this dimension, so we know these chunks were previously
+ * unfiltered */
+ if(disable_edge_filters)
+ min_partial_chunk_sc[op_dim] = old_dim[op_dim] / chunk_dim[op_dim];
} /* end for */
/* Main loop: fill or remove chunks */
@@ -4287,12 +4842,35 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
while(!carry) {
int i; /* Local index variable */
+ udata.common.scaled = scaled;
+
if(0 == ndims_outside_fill) {
HDassert(fill_dim[op_dim]);
HDassert(scaled[op_dim] == min_mod_chunk_sc[op_dim]);
+ /* Make sure this is an edge chunk */
+ HDassert(H5D__chunk_is_partial_edge_chunk(scaled,
+ space_ndims, space_dim, layout->u.chunk.dim));
+
+ /* Determine if the chunk just became an unfiltered chunk */
+ if(new_unfilt_dim[op_dim]) {
+ new_unfilt_chunk = TRUE;
+ for(u = 0; u < space_ndims; u++)
+ if(scaled[u] == min_partial_chunk_sc[u]) {
+ new_unfilt_chunk = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Make sure that, if we think this is a new unfiltered chunk,
+ * it was previously not an edge chunk */
+ HDassert(!new_unfilt_dim[op_dim] || (!new_unfilt_chunk !=
+ !H5D__chunk_is_partial_edge_chunk(scaled,
+ space_ndims, old_dim, layout->u.chunk.dim)));
+ HDassert(!new_unfilt_chunk || new_unfilt_dim[op_dim]);
+
/* Fill the unused parts of the chunk */
- if(H5D__chunk_prune_fill(&udata) < 0)
+ if(H5D__chunk_prune_fill(&udata, new_unfilt_chunk) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
} /* end if */
else {
@@ -4325,7 +4903,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
/* Remove the chunk from disk, if present */
if(H5F_addr_defined(chk_udata.chunk_block.offset)) {
/* Update the offset in idx_udata */
- idx_udata.scaled = scaled;
+ idx_udata.scaled = udata.common.scaled;
/* Remove the chunk from disk */
if((layout->storage.u.chunk.ops->remove)(&idx_info, &idx_udata) < 0)
@@ -4447,12 +5025,14 @@ H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[])
H5D_chk_idx_info_t idx_info; /* Chunked index info */
const H5D_t *dset = io_info->dset; /* Local pointer to dataset info */
H5D_chunk_it_ud2_t udata; /* User data for iteration callback */
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
HDassert(dset);
HDassert(dset->shared);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
HDassert(chunk_addr);
/* Set up user data for B-tree callback */
@@ -4500,6 +5080,7 @@ H5D__chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_storage_t *storage)
H5O_pline_t pline; /* I/O pipeline message */
hbool_t pline_read = FALSE; /* Whether the I/O pipeline message was read from the file */
htri_t exists; /* Flag if header message of interest exists */
+ H5O_storage_chunk_t *sc = &(storage->u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -4508,6 +5089,7 @@ H5D__chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_storage_t *storage)
HDassert(f);
HDassert(oh);
HDassert(storage);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
/* Check for I/O pipeline message */
if((exists = H5O_msg_exists_oh(oh, H5O_PLINE_ID)) < 0)
@@ -4574,6 +5156,8 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
{
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent, *next; /*cache entry */
+ H5D_rdcc_ent_t tmp_head; /* Sentinel entry for temporary entry list */
+ H5D_rdcc_ent_t *tmp_tail; /* Tail pointer for temporary entry list */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
herr_t ret_value = SUCCEED; /* Return value */
@@ -4591,6 +5175,11 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+ /* Add temporary entry list to rdcc */
+ (void)HDmemset(&tmp_head, 0, sizeof(tmp_head));
+ rdcc->tmp_head = &tmp_head;
+ tmp_tail = &tmp_head;
+
/* Recompute the index for each cached chunk that is in a dataset */
for(ent = rdcc->head; ent; ent = next) {
unsigned old_idx; /* Previous index number */
@@ -4611,37 +5200,58 @@ H5D__chunk_update_cache(H5D_t *dset, hid_t dxpl_id)
HDassert(old_ent->locked == FALSE);
HDassert(old_ent->deleted == FALSE);
- /* Mark the old entry as deleted, but do not evict (yet).
- * Make sure we do not make any calls to the index
+ /* Insert the old entry into the temporary list, but do not
+ * evict (yet). Make sure we do not make any calls to the index
* until all chunks have updated indices! */
- old_ent->deleted = TRUE;
+ HDassert(!old_ent->tmp_next);
+ HDassert(!old_ent->tmp_prev);
+ tmp_tail->tmp_next = old_ent;
+ old_ent->tmp_prev = tmp_tail;
+ tmp_tail = old_ent;
} /* end if */
/* Insert this chunk into correct location in hash table */
rdcc->slot[ent->idx] = ent;
- /* If this chunk was previously marked as deleted and therefore
- * not in the hash table, reset the deleted flag.
+ /* If this chunk was previously on the temporary list and therefore
+ * not in the hash table, remove it from the temporary list.
* Otherwise clear the old hash table slot. */
- if(ent->deleted)
- ent->deleted = FALSE;
+ if(ent->tmp_prev) {
+ HDassert(tmp_head.tmp_next);
+ HDassert(tmp_tail != &tmp_head);
+ ent->tmp_prev->tmp_next = ent->tmp_next;
+ if(ent->tmp_next) {
+ ent->tmp_next->tmp_prev = ent->tmp_prev;
+ ent->tmp_next = NULL;
+ } /* end if */
+ else {
+ HDassert(tmp_tail == ent);
+ tmp_tail = ent->tmp_prev;
+ } /* end else */
+ ent->tmp_prev = NULL;
+ } /* end if */
else
rdcc->slot[old_idx] = NULL;
} /* end if */
} /* end for */
- /* Evict chunks that are still marked as deleted */
- for(ent = rdcc->head; ent; ent = next) {
- /* Get the pointer to the next cache entry */
- next = ent->next;
+ /* tmp_tail is no longer needed, and will be invalidated by
+ * H5D_chunk_cache_evict anyways. */
+ tmp_tail = NULL;
+
+ /* Evict chunks that are still on the temporary list */
+ while(tmp_head.tmp_next) {
+ ent = tmp_head.tmp_next;
/* Remove the old entry from the cache */
- if(ent->deleted)
- if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks")
- } /* end for */
+ if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks")
+ } /* end while */
done:
+ /* Remove temporary list from rdcc */
+ rdcc->tmp_head = NULL;
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_update_cache() */
@@ -4698,8 +5308,16 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
/* Check for filtered chunks */
if((is_vlen || fix_ref) && pline && pline->nused) {
- must_filter = TRUE;
- cb_struct.func = NULL; /* no callback function when failed */
+ /* Check if we should disable filters on this chunk */
+ if(udata->common.layout->flags
+ & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
+ /* Check if the chunk is an edge chunk, and disable filters if so */
+ if(!H5D__chunk_is_partial_edge_chunk(chunk_rec->scaled, udata->dset_ndims,
+ udata->dset_dims, udata->common.layout->dim))
+ must_filter = TRUE;
+ } /* end if */
+ else
+ must_filter = TRUE;
} /* end if */
/* Resize the buf if it is too small to hold the data */
@@ -4732,6 +5350,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
if(must_filter) {
unsigned filter_mask = chunk_rec->filter_mask;
+ cb_struct.func = NULL; /* no callback function when failed */
if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed")
} /* end if */
@@ -4806,8 +5425,11 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
udata->buf_size = buf_size;
} /* end if */
+ udata_dst.chunk_idx = H5VM_array_offset_pre(udata_dst.common.layout->ndims - 1,
+ udata_dst.common.layout->down_chunks, udata_dst.common.scaled);
+
/* Allocate chunk in the file */
- if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert) < 0)
+ if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, &udata_dst.need_modify, udata_dst.common.scaled) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
/* Write chunk data to destination file */
@@ -4819,7 +5441,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
H5_BEGIN_TAG(udata->idx_info_dst->dxpl_id, H5AC__COPIED_TAG, H5_ITER_ERROR);
/* Insert chunk record into index */
- if(need_insert && udata->idx_info_dst->storage->ops->insert)
+ if((need_insert || udata_dst.need_modify) && udata->idx_info_dst->storage->ops->insert)
if((udata->idx_info_dst->storage->ops->insert)(udata->idx_info_dst, &udata_dst, NULL) < 0)
HGOTO_ERROR_TAG(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk addr into index")
@@ -4853,6 +5475,9 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
H5D_chunk_it_ud3_t udata; /* User data for iteration callback */
H5D_chk_idx_info_t idx_info_dst; /* Dest. chunked index info */
H5D_chk_idx_info_t idx_info_src; /* Source chunked index info */
+ int sndims; /* Rank of dataspace */
+ hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
+ hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
H5O_pline_t _pline; /* Temporary pipeline info */
const H5O_pline_t *pline; /* Pointer to pipeline info to use */
H5T_path_t *tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */
@@ -4876,9 +5501,11 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
/* Check args */
HDassert(f_src);
HDassert(storage_src);
+ H5D_CHUNK_STORAGE_INDEX_CHK(storage_src);
HDassert(layout_src);
HDassert(f_dst);
HDassert(storage_dst);
+ H5D_CHUNK_STORAGE_INDEX_CHK(storage_dst);
HDassert(ds_extent_src);
HDassert(dt_src);
@@ -4896,17 +5523,15 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
/* Initialize layout information */
{
- hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Curr. size of dataset dimensions */
- int sndims; /* Rank of dataspace */
unsigned ndims; /* Rank of dataspace */
/* Get the dim info for dataset */
- if((sndims = H5S_extent_get_dims(ds_extent_src, curr_dims, NULL)) < 0)
+ if((sndims = H5S_extent_get_dims(ds_extent_src, curr_dims, max_dims)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int);
/* Set the source layout chunk information */
- if(H5D__chunk_set_info_real(layout_src, ndims, curr_dims) < 0)
+ if(H5D__chunk_set_info_real(layout_src, ndims, curr_dims, max_dims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout's chunk info")
} /* end block */
@@ -5053,6 +5678,8 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
udata.buf_space = buf_space;
udata.nelmts = nelmts;
udata.pline = pline;
+ udata.dset_ndims = (unsigned)sndims;
+ udata.dset_dims = curr_dims;
udata.cpy_info = cpy_info;
/* Iterate over chunks to copy data */
@@ -5108,6 +5735,7 @@ H5D__chunk_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5O_layout_t
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5S_t *space = NULL; /* Dataset's dataspace */
H5O_pline_t pline; /* I/O pipeline message */
+ H5O_storage_chunk_t *sc = &(layout->storage.u.chunk);
htri_t exists; /* Flag if header message of interest exists */
hbool_t idx_info_init = FALSE; /* Whether the chunk index info has been initialized */
hbool_t pline_read = FALSE; /* Whether the I/O pipeline message was read */
@@ -5120,6 +5748,7 @@ H5D__chunk_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5O_layout_t
HDassert(loc->file);
HDassert(H5F_addr_defined(loc->addr));
HDassert(layout);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
HDassert(index_size);
/* Check for I/O pipeline message */
@@ -5229,12 +5858,14 @@ H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
herr_t
H5D__chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream)
{
+ H5O_storage_chunk_t *sc = &(dset->shared->layout.storage.u.chunk);
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(dset);
+ H5D_CHUNK_STORAGE_INDEX_CHK(sc);
/* Only display info if stream is defined */
if(stream) {
@@ -5437,6 +6068,45 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_is_partial_edge_chunk
+ *
+ * Purpose: Checks to see if the chunk is a partial edge chunk.
+ * Either dset or (dset_dims and dset_ndims) must be
+ * provided.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 19 Nov 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__chunk_is_partial_edge_chunk(const hsize_t scaled[], unsigned dset_ndims,
+ const hsize_t *dset_dims, const uint32_t *chunk_dims)
+{
+ unsigned u; /* Local index variable */
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(scaled);
+ HDassert(dset_ndims > 0);
+ HDassert(dset_dims);
+ HDassert(chunk_dims);
+
+ /* check if this is a partial edge chunk */
+ for(u = 0; u < dset_ndims; u++)
+ if(((scaled[u] + 1) * chunk_dims[u]) > dset_dims[u])
+ HGOTO_DONE(TRUE);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__chunk_is_partial_edge_chunk() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__chunk_file_alloc()
*
* Purpose: Chunk allocation:
@@ -5452,7 +6122,7 @@ done:
*/
static herr_t
H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk,
- H5F_block_t *new_chunk, hbool_t *need_insert)
+ H5F_block_t *new_chunk, hbool_t *need_insert, hbool_t *need_modify, hsize_t scaled[])
{
hbool_t alloc_chunk = FALSE; /* Whether to allocate chunk */
herr_t ret_value = SUCCEED; /* Return value */
@@ -5465,12 +6135,17 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old
HDassert(idx_info->pline);
HDassert(idx_info->layout);
HDassert(idx_info->storage);
- HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
HDassert(new_chunk);
HDassert(need_insert);
+ HDassert(need_modify);
+
+ *need_modify = FALSE; /* this is mainly for V2-btree */
+ *need_insert = FALSE;
/* Check for filters on chunks */
if(idx_info->pline->nused > 0) {
+
+ HDassert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE);
/* Sanity/error checking block */
{
unsigned allow_chunk_size_len; /* Allowed size of encoded chunk size */
@@ -5500,9 +6175,16 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old
/* Check for chunk being same size */
if(new_chunk->length != old_chunk->length) {
/* Release previous chunk */
- if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, old_chunk->offset, old_chunk->length) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ /* Only free the old location if not doing SWMR writes - otherwise
+ * we must keep the old chunk around in case a reader has an
+ * outdated version of the b-tree node
+ */
+ if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) {
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, old_chunk->offset, old_chunk->length) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ }
alloc_chunk = TRUE;
+ *need_modify = TRUE;
} /* end if */
else {
/* Don't need to reallocate chunk, but send its address back up */
@@ -5524,13 +6206,35 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old
/* Actually allocate space for the chunk in the file */
if(alloc_chunk) {
switch(idx_info->storage->idx_type) {
+ case H5D_CHUNK_IDX_NONE:
+ {
+ H5D_chunk_ud_t udata;
+
+ udata.common.scaled = scaled;
+ if((idx_info->storage->ops->get_addr)(idx_info, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address")
+ new_chunk->offset = udata.chunk_block.offset;
+ HDassert(new_chunk->length == udata.chunk_block.length);
+ break;
+ }
+
+ case H5D_CHUNK_IDX_EARRAY:
+ case H5D_CHUNK_IDX_FARRAY:
+ case H5D_CHUNK_IDX_BT2:
case H5D_CHUNK_IDX_BTREE:
+ case H5D_CHUNK_IDX_SINGLE:
HDassert(new_chunk->length > 0);
H5_CHECK_OVERFLOW(new_chunk->length, /*From: */uint32_t, /*To: */hsize_t);
new_chunk->offset = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, (hsize_t)new_chunk->length);
if(!H5F_addr_defined(new_chunk->offset))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed")
- *need_insert = TRUE;
+ if(idx_info->storage->idx_type == H5D_CHUNK_IDX_BT2) {
+ /* This can be done together with other index types when Quincy checks into H5B2_modify() */
+ if(!(*need_modify))
+ *need_insert = TRUE;
+ } else
+ *need_insert = TRUE;
+
break;
case H5D_CHUNK_IDX_NTYPES:
@@ -5546,3 +6250,134 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__chunk_file_alloc() */
+/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_format_convert_cb
+ *
+ * Purpose: Callback routine to insert chunk address into v1 B-tree
+ * chunk index.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
+{
+ H5D_chunk_it_ud5_t *udata = (H5D_chunk_it_ud5_t *)_udata; /* User data */
+ H5D_chk_idx_info_t *new_idx_info; /* The new chunk index information */
+ H5D_chunk_ud_t insert_udata; /* Chunk information to be inserted */
+ haddr_t chunk_addr; /* Chunk address */
+ size_t nbytes; /* Chunk size */
+ void *buf = NULL; /* Pointer to buffer of chunk data */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ new_idx_info = udata->new_idx_info;
+ H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t);
+ chunk_addr = chunk_rec->chunk_addr;
+
+ if(new_idx_info->pline->nused &&
+ (new_idx_info->layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) &&
+ (H5D__chunk_is_partial_edge_chunk(chunk_rec->scaled, udata->dset_ndims, udata->dset_dims,
+ new_idx_info->layout->dim)) ) {
+ /* This is a partial non-filtered edge chunk */
+ /* Convert the chunk to a filtered edge chunk for v1 B-tree chunk index */
+
+ unsigned filter_mask = chunk_rec->filter_mask;
+ H5Z_cb_t cb_struct; /* Filter failure callback struct */
+ size_t read_size = nbytes; /* Bytes to read */
+
+ HDassert(read_size == new_idx_info->layout->size);
+
+ cb_struct.func = NULL; /* no callback function when failed */
+
+ /* Allocate buffer for chunk data */
+ if(NULL == (buf = H5MM_malloc(read_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for raw data chunk")
+
+ /* Read the non-filtered edge chunk */
+ if(H5F_block_read(new_idx_info->f, H5FD_MEM_DRAW, chunk_addr, read_size, new_idx_info->dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk")
+
+ /* Pass the chunk through the pipeline */
+ if(H5Z_pipeline(new_idx_info->pline, 0, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes,
+ &read_size, &buf) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
+
+#if H5_SIZEOF_SIZE_T > 4
+ /* Check for the chunk expanding too much to encode in a 32-bit value */
+ if(nbytes > ((size_t)0xffffffff))
+ HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length")
+#endif /* H5_SIZEOF_SIZE_T > 4 */
+
+ /* Allocate space for the filtered chunk */
+ if((chunk_addr = H5MF_alloc(new_idx_info->f, H5FD_MEM_DRAW, new_idx_info->dxpl_id, (hsize_t)nbytes)) == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_DATASET, H5E_NOSPACE, H5_ITER_ERROR, "file allocation failed for filtered chunk")
+ HDassert(H5F_addr_defined(chunk_addr));
+
+ /* Write the filtered chunk to disk */
+ if(H5F_block_write(new_idx_info->f, H5FD_MEM_DRAW, chunk_addr, nbytes,
+ new_idx_info->dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to write raw data to file")
+ }
+
+ /* Set up chunk information for insertion to chunk index */
+ insert_udata.chunk_block.offset = chunk_addr;
+ insert_udata.chunk_block.length = nbytes;
+ insert_udata.filter_mask = chunk_rec->filter_mask;
+ insert_udata.common.scaled = chunk_rec->scaled;
+ insert_udata.common.layout = new_idx_info->layout;
+ insert_udata.common.storage = new_idx_info->storage;
+
+ /* Insert chunk into the v1 B-tree chunk index */
+ if((new_idx_info->storage->ops->insert)(new_idx_info, &insert_udata, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk addr into index")
+
+done:
+ if(buf)
+ H5MM_xfree(buf);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__chunk_format_convert_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_format_convert
+ *
+ * Purpose: Iterate over the chunks for the current chunk index and insert the
+ * the chunk addresses into v1 B-tree chunk index via callback.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__chunk_format_convert(H5D_t *dset, H5D_chk_idx_info_t *idx_info, H5D_chk_idx_info_t *new_idx_info)
+{
+ H5D_chunk_it_ud5_t udata; /* User data */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Check args */
+ HDassert(dset);
+
+ /* Set up user data */
+ udata.new_idx_info = new_idx_info;
+ udata.dset_ndims = dset->shared->ndims;
+ udata.dset_dims = dset->shared->curr_dims;
+
+ /* terate over the chunks in the current index and insert the chunk addresses into version 1 B-tree index */
+ if((dset->shared->layout.storage.u.chunk.ops->iterate)(idx_info, H5D__chunk_format_convert_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to chunk info")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_format_convert() */
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index ebe75bd..e2dc36f 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -442,10 +442,6 @@ H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst
HDassert(storage_dst);
HDassert(dt_src);
- /* Allocate space for destination data */
- if(NULL == (storage_dst->buf = H5MM_malloc(storage_src->size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate memory for compact dataset")
-
/* Create datatype ID for src datatype, so it gets freed */
if((tid_src = H5I_register(H5I_DATATYPE, dt_src, FALSE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register source file datatype")
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index b5b9b0d..6276af1 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -370,8 +370,9 @@ H5D__contig_delete(H5F_t *f, hid_t dxpl_id, const H5O_storage_t *storage)
HDassert(storage);
/* Free the file space for the chunk */
- if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, storage->u.contig.addr, storage->u.contig.size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free contiguous storage space")
+ if(H5F_addr_defined(storage->u.contig.addr))
+ if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, storage->u.contig.addr, storage->u.contig.size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free contiguous storage space")
done:
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Dearray.c b/src/H5Dearray.c
new file mode 100644
index 0000000..35de599
--- /dev/null
+++ b/src/H5Dearray.c
@@ -0,0 +1,1866 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
+ * Tuesday, January 27, 2009
+ *
+ * Purpose: Extensible array indexed (chunked) I/O functions. The chunks
+ * are given a single-dimensional index which is used as the
+ * offset in an extensible array that maps a chunk coordinate to
+ * a disk address.
+ *
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Dmodule.h" /* This source code file is part of the H5D module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Dpkg.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5EAprivate.h" /* Extensible arrays */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File space management */
+#include "H5VMprivate.h" /* Vector functions */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* Value to fill unset array elements with */
+#define H5D_EARRAY_FILL HADDR_UNDEF
+#define H5D_EARRAY_FILT_FILL {HADDR_UNDEF, 0, 0}
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/* Extensible array create/open user data */
+typedef struct H5D_earray_ctx_ud_t {
+ const H5F_t *f; /* Pointer to file info */
+ uint32_t chunk_size; /* Size of chunk (bytes) */
+} H5D_earray_ctx_ud_t;
+
+/* Extensible array callback context */
+typedef struct H5D_earray_ctx_t {
+ size_t file_addr_len; /* Size of addresses in the file (bytes) */
+ size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */
+} H5D_earray_ctx_t;
+
+/* User data for chunk callbacks */
+typedef struct H5D_earray_ud_t {
+ H5F_t *f; /* File pointer for operation */
+ hid_t dxpl_id; /* DXPL ID for operation */
+} H5D_earray_ud_t;
+
+/* Extensible Array callback info for iteration over chunks */
+typedef struct H5D_earray_it_ud_t {
+ H5D_chunk_common_ud_t common; /* Common info for Fixed Array user data (must be first) */
+ H5D_chunk_rec_t chunk_rec; /* Generic chunk record for callback */
+ hbool_t filtered; /* Whether the chunks are filtered */
+ H5D_chunk_cb_func_t cb; /* Chunk callback routine */
+ void *udata; /* User data for chunk callback routine */
+} H5D_earray_it_ud_t;
+
+/* Native extensible array element for chunks w/filters */
+typedef struct H5D_earray_filt_elmt_t {
+ haddr_t addr; /* Address of chunk */
+ uint32_t nbytes; /* Size of chunk (in file) */
+ uint32_t filter_mask; /* Excluded filters for chunk */
+} H5D_earray_filt_elmt_t;
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+/* Extensible array iterator callbacks */
+static int H5D__earray_idx_iterate_cb(hsize_t idx, const void *_elmt, void *_udata);
+static int H5D__earray_idx_delete_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
+
+/* Extensible array class callbacks for chunks w/o filters */
+static void *H5D__earray_crt_context(void *udata);
+static herr_t H5D__earray_dst_context(void *ctx);
+static herr_t H5D__earray_fill(void *nat_blk, size_t nelmts);
+static herr_t H5D__earray_encode(void *raw, const void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__earray_decode(const void *raw, void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__earray_debug(FILE *stream, int indent, int fwidth,
+ hsize_t idx, const void *elmt);
+static void *H5D__earray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr);
+static herr_t H5D__earray_dst_dbg_context(void *dbg_ctx);
+
+/* Extensible array class callbacks for chunks w/filters */
+/* (some shared with callbacks for chunks w/o filters) */
+static herr_t H5D__earray_filt_fill(void *nat_blk, size_t nelmts);
+static herr_t H5D__earray_filt_encode(void *raw, const void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__earray_filt_decode(const void *raw, void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__earray_filt_debug(FILE *stream, int indent, int fwidth,
+ hsize_t idx, const void *elmt);
+
+/* Chunked layout indexing callbacks */
+static herr_t H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info,
+ const H5S_t *space, haddr_t dset_ohdr_addr);
+static herr_t H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info);
+static hbool_t H5D__earray_idx_is_space_alloc(const H5O_storage_chunk_t *storage);
+static herr_t H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata, const H5D_t *dset);
+static herr_t H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata);
+static herr_t H5D__earray_idx_resize(H5O_layout_chunk_t *layout);
+static int H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata);
+static herr_t H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata);
+static herr_t H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst);
+static herr_t H5D__earray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
+ H5O_storage_chunk_t *storage_dst, hid_t dxpl_id);
+static herr_t H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info,
+ hsize_t *size);
+static herr_t H5D__earray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr);
+static herr_t H5D__earray_idx_dump(const H5O_storage_chunk_t *storage,
+ FILE *stream);
+static herr_t H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info);
+
+/* Generic extensible array routines */
+static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__earray_idx_undepend(const H5D_chk_idx_info_t *idx_info);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Extensible array indexed chunk I/O ops */
+const H5D_chunk_ops_t H5D_COPS_EARRAY[1] = {{
+ TRUE, /* Extensible array indices support SWMR access */
+ H5D__earray_idx_init, /* init */
+ H5D__earray_idx_create, /* create */
+ H5D__earray_idx_is_space_alloc, /* is_space_alloc */
+ H5D__earray_idx_insert, /* insert */
+ H5D__earray_idx_get_addr, /* get_addr */
+ H5D__earray_idx_resize, /* resize */
+ H5D__earray_idx_iterate, /* iterate */
+ H5D__earray_idx_remove, /* remove */
+ H5D__earray_idx_delete, /* delete */
+ H5D__earray_idx_copy_setup, /* copy_setup */
+ H5D__earray_idx_copy_shutdown, /* copy_shutdown */
+ H5D__earray_idx_size, /* size */
+ H5D__earray_idx_reset, /* reset */
+ H5D__earray_idx_dump, /* dump */
+ H5D__earray_idx_dest /* destroy */
+}};
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+/* Extensible array class callbacks for dataset chunks w/o filters */
+const H5EA_class_t H5EA_CLS_CHUNK[1]={{
+ H5EA_CLS_CHUNK_ID, /* Type of extensible array */
+ "Chunk w/o filters", /* Name of extensible array class */
+ sizeof(haddr_t), /* Size of native element */
+ H5D__earray_crt_context, /* Create context */
+ H5D__earray_dst_context, /* Destroy context */
+ H5D__earray_fill, /* Fill block of missing elements callback */
+ H5D__earray_encode, /* Element encoding callback */
+ H5D__earray_decode, /* Element decoding callback */
+ H5D__earray_debug, /* Element debugging callback */
+ H5D__earray_crt_dbg_context, /* Create debugging context */
+ H5D__earray_dst_dbg_context /* Destroy debugging context */
+}};
+
+/* Extensible array class callbacks for dataset chunks w/filters */
+const H5EA_class_t H5EA_CLS_FILT_CHUNK[1]={{
+ H5EA_CLS_FILT_CHUNK_ID, /* Type of extensible array */
+ "Chunk w/filters", /* Name of extensible array class */
+ sizeof(H5D_earray_filt_elmt_t), /* Size of native element */
+ H5D__earray_crt_context, /* Create context */
+ H5D__earray_dst_context, /* Destroy context */
+ H5D__earray_filt_fill, /* Fill block of missing elements callback */
+ H5D__earray_filt_encode, /* Element encoding callback */
+ H5D__earray_filt_decode, /* Element decoding callback */
+ H5D__earray_filt_debug, /* Element debugging callback */
+ H5D__earray_crt_dbg_context, /* Create debugging context */
+ H5D__earray_dst_dbg_context /* Destroy debugging context */
+}};
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* Declare a free list to manage the H5D_earray_ctx_t struct */
+/* Declare a free list to manage the H5D_earray_ctx_ud_t struct */
+H5FL_DEFINE_STATIC(H5D_earray_ctx_t);
+H5FL_DEFINE_STATIC(H5D_earray_ctx_ud_t);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_crt_context
+ *
+ * Purpose: Create context for callbacks
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D__earray_crt_context(void *_udata)
+{
+ H5D_earray_ctx_t *ctx; /* Extensible array callback context */
+ H5D_earray_ctx_ud_t *udata = (H5D_earray_ctx_ud_t *)_udata; /* User data for extensible array context */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(udata->chunk_size > 0);
+
+ /* Allocate new context structure */
+ if(NULL == (ctx = H5FL_MALLOC(H5D_earray_ctx_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate extensible array client callback context")
+
+ /* Initialize the context */
+ ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f);
+
+ /* Compute the size required for encoding the size of a chunk, allowing
+ * for an extra byte, in case the filter makes the chunk larger.
+ */
+ ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8);
+ if(ctx->chunk_size_len > 8)
+ ctx->chunk_size_len = 8;
+
+ /* Set return value */
+ ret_value = ctx;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_crt_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_dst_context
+ *
+ * Purpose: Destroy context for callbacks
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_dst_context(void *_ctx)
+{
+ H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(ctx);
+
+ /* Release context structure */
+ ctx = H5FL_FREE(H5D_earray_ctx_t, ctx);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_dst_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_fill
+ *
+ * Purpose: Fill "missing elements" in block of elements
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, January 27, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_fill(void *nat_blk, size_t nelmts)
+{
+ haddr_t fill_val = H5D_EARRAY_FILL; /* Value to fill elements with */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(nat_blk);
+ HDassert(nelmts);
+
+ H5VM_array_fill(nat_blk, &fill_val, H5EA_CLS_CHUNK->nat_elmt_size, nelmts);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_fill() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_encode
+ *
+ * Purpose: Encode an element from "native" to "raw" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, January 27, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_encode(void *raw, const void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */
+ const haddr_t *elmt = (const haddr_t *)_elmt; /* Convenience pointer to native elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+ HDassert(ctx);
+
+ /* Encode native elements into raw elements */
+ while(nelmts) {
+ /* Encode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_encode_len(ctx->file_addr_len, (uint8_t **)&raw, *elmt);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to encode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_decode
+ *
+ * Purpose: Decode an element from "raw" to "native" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */
+ haddr_t *elmt = (haddr_t *)_elmt; /* Convenience pointer to native elements */
+ const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+
+ /* Decode raw elements into native elements */
+ while(nelmts) {
+ /* Decode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_decode_len(ctx->file_addr_len, &raw, elmt);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to decode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_debug
+ *
+ * Purpose: Display an element for debugging
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_debug(FILE *stream, int indent, int fwidth, hsize_t idx,
+ const void *elmt)
+{
+ char temp_str[128]; /* Temporary string, for formatting */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(stream);
+ HDassert(elmt);
+
+ /* Print element */
+ sprintf(temp_str, "Element #%llu:", (unsigned long long)idx);
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, temp_str,
+ *(const haddr_t *)elmt);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_filt_fill
+ *
+ * Purpose: Fill "missing elements" in block of elements
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_filt_fill(void *nat_blk, size_t nelmts)
+{
+ H5D_earray_filt_elmt_t fill_val = H5D_EARRAY_FILT_FILL; /* Value to fill elements with */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(nat_blk);
+ HDassert(nelmts);
+ HDassert(sizeof(fill_val) == H5EA_CLS_FILT_CHUNK->nat_elmt_size);
+
+ H5VM_array_fill(nat_blk, &fill_val, H5EA_CLS_FILT_CHUNK->nat_elmt_size, nelmts);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_filt_fill() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_filt_encode
+ *
+ * Purpose: Encode an element from "native" to "raw" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_filt_encode(void *_raw, const void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */
+ uint8_t *raw = (uint8_t *)_raw; /* Convenience pointer to raw elements */
+ const H5D_earray_filt_elmt_t *elmt = (const H5D_earray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+ HDassert(ctx);
+
+ /* Encode native elements into raw elements */
+ while(nelmts) {
+ /* Encode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_encode_len(ctx->file_addr_len, &raw, elmt->addr);
+ UINT64ENCODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len);
+ UINT32ENCODE(raw, elmt->filter_mask);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to encode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_filt_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_filt_decode
+ *
+ * Purpose: Decode an element from "raw" to "native" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_filt_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */
+ H5D_earray_filt_elmt_t *elmt = (H5D_earray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */
+ const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+
+ /* Decode raw elements into native elements */
+ while(nelmts) {
+ /* Decode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_decode_len(ctx->file_addr_len, &raw, &elmt->addr);
+ UINT64DECODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len);
+ UINT32DECODE(raw, elmt->filter_mask);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to decode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_filt_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_filt_debug
+ *
+ * Purpose: Display an element for debugging
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx,
+ const void *_elmt)
+{
+ const H5D_earray_filt_elmt_t *elmt = (const H5D_earray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */
+ char temp_str[128]; /* Temporary string, for formatting */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(stream);
+ HDassert(elmt);
+
+ /* Print element */
+ sprintf(temp_str, "Element #%llu:", (unsigned long long)idx);
+ HDfprintf(stream, "%*s%-*s {%a, %u, %0x}\n", indent, "", fwidth, temp_str,
+ elmt->addr, elmt->nbytes, elmt->filter_mask);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_filt_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_crt_dbg_context
+ *
+ * Purpose: Create context for debugging callback
+ * (get the layout message in the specified object header)
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Vailin Choi; July 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D__earray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr)
+{
+ H5D_earray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */
+ H5O_loc_t obj_loc; /* Pointer to an object's location */
+ hbool_t obj_opened = FALSE; /* Flag to indicate that the object header was opened */
+ H5O_layout_t layout; /* Layout message */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(H5F_addr_defined(obj_addr));
+
+ /* Allocate context for debugging callback */
+ if(NULL == (dbg_ctx = H5FL_MALLOC(H5D_earray_ctx_ud_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate extensible array client callback context")
+
+ /* Set up the object header location info */
+ H5O_loc_reset(&obj_loc);
+ obj_loc.file = f;
+ obj_loc.addr = obj_addr;
+
+ /* Open the object header where the layout message resides */
+ if(H5O_open(&obj_loc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header")
+ obj_opened = TRUE;
+
+ /* Read the layout message */
+ if(NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout, dxpl_id))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info")
+
+ /* close the object header */
+ if(H5O_close(&obj_loc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header")
+
+ /* Create user data */
+ dbg_ctx->f = f;
+ dbg_ctx->chunk_size = layout.u.chunk.size;
+
+ /* Set return value */
+ ret_value = dbg_ctx;
+
+done:
+ /* Cleanup on error */
+ if(ret_value == NULL) {
+ /* Release context structure */
+ if(dbg_ctx)
+ dbg_ctx = H5FL_FREE(H5D_earray_ctx_ud_t, dbg_ctx);
+
+ /* Close object header */
+ if(obj_opened) {
+ if(H5O_close(&obj_loc) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header")
+ } /* end if */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_crt_dbg_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_dst_dbg_context
+ *
+ * Purpose: Destroy context for debugging callback
+ * (free the layout message from the specified object header)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; July 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_dst_dbg_context(void *_dbg_ctx)
+{
+ H5D_earray_ctx_ud_t *dbg_ctx = (H5D_earray_ctx_ud_t *)_dbg_ctx; /* Context for extensible array callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(dbg_ctx);
+
+ /* Release context structure */
+ dbg_ctx = H5FL_FREE(H5D_earray_ctx_ud_t, dbg_ctx);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_dst_dbg_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_depend
+ *
+ * Purpose: Create flush dependency between extensible array and dataset's
+ * object header.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, June 2, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_depend(const H5D_chk_idx_info_t *idx_info)
+{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(idx_info->storage->u.earray.ea);
+
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.earray.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+
+ /* Make the extensible array a child flush dependency of the dataset's object header */
+ if(H5EA_depend((H5AC_info_t *)oh_proxy, idx_info->storage->u.earray.ea) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header")
+
+done:
+ /* Unpin the dataset's object header proxy */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_undepend
+ *
+ * Purpose: Remove flush dependency between extensible array and dataset's
+ * object header.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, June 2, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_undepend(const H5D_chk_idx_info_t *idx_info)
+{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(idx_info->storage->u.earray.ea);
+
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.earray.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+
+ /* Remove the extensible array as a child flush dependency of the dataset's object header */
+ if(H5EA_undepend((H5AC_info_t *)oh_proxy, idx_info->storage->u.earray.ea) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTUNDEPEND, FAIL, "unable to remove flush dependency on object header")
+
+done:
+ /* Unpin the dataset's object header proxy */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_undepend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_open
+ *
+ * Purpose: Opens an existing extensible array.
+ *
+ * Note: This information is passively initialized from each index
+ * operation callback because those abstract chunk index operations
+ * are designed to work with the v1 B-tree chunk indices also,
+ * which don't require an 'open' for the data structure.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info)
+{
+ H5D_earray_ctx_ud_t udata; /* User data for extensible array open call */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(NULL == idx_info->storage->u.earray.ea);
+
+ /* Set up the user data */
+ udata.f = idx_info->f;
+ udata.chunk_size = idx_info->layout->size;
+
+ /* Open the extensible array for the chunk index */
+ if(NULL == (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &udata)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array")
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)
+ if(H5D__earray_idx_depend(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_open() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_init
+ *
+ * Purpose: Initialize the indexing information for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, May 27, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space,
+ haddr_t dset_ohdr_addr)
+{
+ hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Max. size of dataset dimensions */
+ int unlim_dim; /* Rank of the dataset's unlimited dimension */
+ int sndims; /* Rank of dataspace */
+ unsigned ndims; /* Rank of dataspace */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(space);
+ HDassert(H5F_addr_defined(dset_ohdr_addr));
+
+ /* Get the dim info for dataset */
+ if((sndims = H5S_get_simple_extent_dims(space, NULL, max_dims)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
+ H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int);
+
+ /* Find the rank of the unlimited dimension */
+ unlim_dim = (-1);
+ for(u = 0; u < ndims; u++) {
+ /* Check for unlimited dimension */
+ if(H5S_UNLIMITED == max_dims[u]) {
+ /* Check if we've already found an unlimited dimension */
+ if(unlim_dim >= 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_ALREADYINIT, FAIL, "already found unlimited dimension")
+
+ /* Set the unlimited dimension */
+ unlim_dim = (int)u;
+ } /* end if */
+ } /* end for */
+
+ /* Check if we didn't find an unlimited dimension */
+ if(unlim_dim < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_UNINITIALIZED, FAIL, "didn't find unlimited dimension")
+
+ /* Set the unlimited dimension for the layout's future use */
+ idx_info->layout->u.earray.unlim_dim = (unsigned)unlim_dim;
+
+ /* Store the dataset's object header address for later */
+ idx_info->storage->u.earray.dset_ohdr_addr = dset_ohdr_addr;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_init() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_create
+ *
+ * Purpose: Creates a new indexed-storage extensible array and initializes
+ * the layout struct with information about the storage. The
+ * struct should be immediately written to the object header.
+ *
+ * This function must be called before passing LAYOUT to any of
+ * the other indexed storage functions!
+ *
+ * Return: Non-negative on success (with the LAYOUT argument initialized
+ * and ready to write to an object header). Negative on failure.
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, January 27, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info)
+{
+ H5EA_create_t cparam; /* Extensible array creation parameters */
+ H5D_earray_ctx_ud_t udata; /* User data for extensible array create call */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(!H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(NULL == idx_info->storage->u.earray.ea);
+
+ /* General parameters */
+ if(idx_info->pline->nused > 0) {
+ unsigned chunk_size_len; /* Size of encoded chunk size */
+
+ /* Compute the size required for encoding the size of a chunk, allowing
+ * for an extra byte, in case the filter makes the chunk larger.
+ */
+ chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8);
+ if(chunk_size_len > 8)
+ chunk_size_len = 8;
+
+ cparam.cls = H5EA_CLS_FILT_CHUNK;
+ cparam.raw_elmt_size = (uint8_t)(H5F_SIZEOF_ADDR(idx_info->f) + chunk_size_len + 4);
+ } /* end if */
+ else {
+ cparam.cls = H5EA_CLS_CHUNK;
+ cparam.raw_elmt_size = (uint8_t)H5F_SIZEOF_ADDR(idx_info->f);
+ } /* end else */
+ cparam.max_nelmts_bits = idx_info->layout->u.earray.cparam.max_nelmts_bits;
+ HDassert(cparam.max_nelmts_bits > 0);
+ cparam.idx_blk_elmts = idx_info->layout->u.earray.cparam.idx_blk_elmts;
+ HDassert(cparam.idx_blk_elmts > 0);
+ cparam.sup_blk_min_data_ptrs = idx_info->layout->u.earray.cparam.sup_blk_min_data_ptrs;
+ HDassert(cparam.sup_blk_min_data_ptrs > 0);
+ cparam.data_blk_min_elmts = idx_info->layout->u.earray.cparam.data_blk_min_elmts;
+ HDassert(cparam.data_blk_min_elmts > 0);
+ cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.earray.cparam.max_dblk_page_nelmts_bits;
+ HDassert(cparam.max_dblk_page_nelmts_bits > 0);
+
+ /* Set up the user data */
+ udata.f = idx_info->f;
+ udata.chunk_size = idx_info->layout->size;
+
+ /* Create the extensible array for the chunk index */
+ if(NULL == (idx_info->storage->u.earray.ea = H5EA_create(idx_info->f, idx_info->dxpl_id, &cparam, &udata)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create extensible array")
+
+ /* Get the address of the extensible array in file */
+ if(H5EA_get_addr(idx_info->storage->u.earray.ea, &(idx_info->storage->idx_addr)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array address")
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)
+ if(H5D__earray_idx_depend(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_is_space_alloc
+ *
+ * Purpose: Query if space is allocated for index method
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__earray_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr))
+} /* end H5D__earray_idx_is_space_alloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_insert
+ *
+ * Purpose: Insert chunk address into the indexing structure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; May 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata,
+ const H5D_t H5_ATTR_UNUSED *dset)
+{
+ H5EA_t *ea; /* Pointer to extensible array structure */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the extensible array is open yet */
+ if(NULL == idx_info->storage->u.earray.ea)
+ /* Open the extensible array in file */
+ if(H5D__earray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array")
+
+ /* Set convenience pointer to extensible array structure */
+ ea = idx_info->storage->u.earray.ea;
+
+ if(!H5F_addr_defined(udata->chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "The chunk should have allocated already")
+ if(udata->chunk_idx != (udata->chunk_idx & 0xffffffff)) /* negative value */
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "chunk index must be less than 2^32")
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ H5D_earray_filt_elmt_t elmt; /* Extensible array element */
+
+ elmt.addr = udata->chunk_block.offset;
+ H5_CHECKED_ASSIGN(elmt.nbytes, uint32_t, udata->chunk_block.length, hsize_t);
+ elmt.filter_mask = udata->filter_mask;
+
+ /* Set the info for the chunk */
+ if(H5EA_set(ea, idx_info->dxpl_id, udata->chunk_idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk info")
+ } /* end if */
+ else {
+ /* Set the address for the chunk */
+ if(H5EA_set(ea, idx_info->dxpl_id, udata->chunk_idx, &udata->chunk_block.offset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk address")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__earray_idx_insert() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_get_addr
+ *
+ * Purpose: Get the file address of a chunk if file space has been
+ * assigned. Save the retrieved information in the udata
+ * supplied.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
+{
+ H5EA_t *ea; /* Pointer to extensible array structure */
+ hsize_t idx; /* Array index of chunk */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the extensible array is open yet */
+ if(NULL == idx_info->storage->u.earray.ea)
+ /* Open the extensible array in file */
+ if(H5D__earray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array")
+
+ /* Set convenience pointer to extensible array structure */
+ ea = idx_info->storage->u.earray.ea;
+
+ /* Check for unlimited dim. not being the slowest-changing dim. */
+ if(idx_info->layout->u.earray.unlim_dim > 0) {
+ hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */
+ unsigned ndims = (idx_info->layout->ndims - 1); /* Number of dimensions */
+ unsigned u;
+
+ /* Compute coordinate offset from scaled offset */
+ for(u = 0; u < ndims; u++)
+ swizzled_coords[u] = udata->common.scaled[u] * idx_info->layout->dim[u];
+
+ H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim);
+
+ /* Calculate the index of this chunk */
+ idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_down_chunks);
+ } /* end if */
+ else {
+ /* Calculate the index of this chunk */
+ idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->down_chunks, udata->common.scaled);
+ } /* end else */
+
+ udata->chunk_idx = idx;
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ H5D_earray_filt_elmt_t elmt; /* Extensible array element */
+
+ /* Get the information for the chunk */
+ if(H5EA_get(ea, idx_info->dxpl_id, idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info")
+
+ /* Set the info for the chunk */
+ udata->chunk_block.offset = elmt.addr;
+ udata->chunk_block.length = elmt.nbytes;
+ udata->filter_mask = elmt.filter_mask;
+ } /* end if */
+ else {
+ /* Get the address for the chunk */
+ if(H5EA_get(ea, idx_info->dxpl_id, idx, &udata->chunk_block.offset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+
+ /* Update the other (constant) information for the chunk */
+ udata->chunk_block.length = idx_info->layout->size;
+ udata->filter_mask = 0;
+ } /* end else */
+
+ if(!H5F_addr_defined(udata->chunk_block.offset))
+ udata->chunk_block.length = 0;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__earray_idx_get_addr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_resize
+ *
+ * Purpose: Calculate/setup the swizzled down chunk array, used for chunk
+ * index calculations.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, July 23, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_resize(H5O_layout_chunk_t *layout)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(layout);
+
+ /* "Swizzle" constant dimensions for this dataset */
+ if(layout->u.earray.unlim_dim > 0) {
+ hsize_t swizzled_chunks[H5O_LAYOUT_NDIMS]; /* Swizzled form of # of chunks in each dimension */
+
+ /* Get the swizzled chunk dimensions */
+ HDmemcpy(layout->u.earray.swizzled_dim, layout->dim, (layout->ndims - 1) * sizeof(layout->dim[0]));
+ H5VM_swizzle_coords(uint32_t, layout->u.earray.swizzled_dim, layout->u.earray.unlim_dim);
+
+ /* Get the swizzled number of chunks in each dimension */
+ HDmemcpy(swizzled_chunks, layout->chunks, (layout->ndims - 1) * sizeof(swizzled_chunks[0]));
+ H5VM_swizzle_coords(hsize_t, swizzled_chunks, layout->u.earray.unlim_dim);
+
+ /* Get the swizzled "down" sizes for each dimension */
+ if(H5VM_array_down((layout->ndims - 1), swizzled_chunks, layout->u.earray.swizzled_down_chunks) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute swizzled 'down' chunk size value")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_resize() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_iterate_cb
+ *
+ * Purpose: Callback routine for extensible array element iteration.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__earray_idx_iterate_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void *_udata)
+{
+ H5D_earray_it_ud_t *udata = (H5D_earray_it_ud_t *)_udata; /* User data */
+ unsigned ndims; /* Rank of chunk */
+ int curr_dim; /* Current dimension */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Compose generic chunk record for callback */
+ if(udata->filtered) {
+ const H5D_earray_filt_elmt_t *filt_elmt = (const H5D_earray_filt_elmt_t *)_elmt;
+
+ udata->chunk_rec.chunk_addr = filt_elmt->addr;
+ udata->chunk_rec.nbytes = filt_elmt->nbytes;
+ udata->chunk_rec.filter_mask = filt_elmt->filter_mask;
+ } /* end if */
+ else
+ udata->chunk_rec.chunk_addr = *(const haddr_t *)_elmt;
+
+ /* Make "generic chunk" callback */
+ if(H5F_addr_defined(udata->chunk_rec.chunk_addr))
+ if((ret_value = (udata->cb)(&udata->chunk_rec, udata->udata)) < 0)
+ HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback");
+
+ /* Update coordinates of chunk in dataset */
+ ndims = udata->common.layout->ndims - 1;
+ HDassert(ndims > 0);
+ curr_dim = (int)(ndims - 1);
+ while(curr_dim >= 0) {
+ /* Increment coordinate in current dimension */
+ udata->chunk_rec.scaled[curr_dim]++;
+
+ /* Check if we went off the end of the current dimension */
+ if(udata->chunk_rec.scaled[curr_dim] >= udata->common.layout->chunks[curr_dim]) {
+ /* Reset coordinate & move to next faster dimension */
+ udata->chunk_rec.scaled[curr_dim] = 0;
+ curr_dim--;
+ } /* end if */
+ else
+ break;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__earray_idx_iterate_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_iterate
+ *
+ * Purpose: Iterate over the chunks in an index, making a callback
+ * for each one.
+ *
+ * Note: This implementation is slow, particularly for sparse
+ * extensible arrays, replace it with call to H5EA_iterate()
+ * when that's available.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata)
+{
+ H5EA_t *ea; /* Pointer to extensible array structure */
+ H5EA_stat_t ea_stat; /* Extensible array statistics */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(chunk_cb);
+ HDassert(chunk_udata);
+
+ /* Check if the extensible array is open yet */
+ if(NULL == idx_info->storage->u.earray.ea)
+ /* Open the extensible array in file */
+ if(H5D__earray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array")
+
+ /* Set convenience pointer to extensible array structure */
+ ea = idx_info->storage->u.earray.ea;
+
+ /* Get the extensible array statistics */
+ if(H5EA_get_stats(ea, &ea_stat) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array statistics")
+
+ if(ea_stat.stored.max_idx_set > 0) {
+ H5D_earray_it_ud_t udata; /* User data for iteration callback */
+
+ /* Initialize userdata */
+ HDmemset(&udata, 0, sizeof udata);
+ udata.common.layout = idx_info->layout;
+ udata.common.storage = idx_info->storage;
+ udata.common.rdcc = NULL;
+ HDmemset(&udata.chunk_rec, 0, sizeof(udata.chunk_rec));
+ udata.filtered = (idx_info->pline->nused > 0);
+ if(!udata.filtered) {
+ udata.chunk_rec.nbytes = idx_info->layout->size;
+ udata.chunk_rec.filter_mask = 0;
+ } /* end if */
+ udata.cb = chunk_cb;
+ udata.udata = chunk_udata;
+
+ /* Iterate over the extensible array elements */
+ if((ret_value = H5EA_iterate(ea, idx_info->dxpl_id, H5D__earray_idx_iterate_cb, &udata)) < 0)
+ HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over fixed array chunk index");
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_iterate() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_remove
+ *
+ * Purpose: Remove chunk from index.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata)
+{
+ H5EA_t *ea; /* Pointer to extensible array structure */
+ hsize_t idx; /* Array index of chunk */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the extensible array is open yet */
+ if(NULL == idx_info->storage->u.earray.ea)
+ /* Open the extensible array in file */
+ if(H5D__earray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array")
+
+ /* Set convenience pointer to extensible array structure */
+ ea = idx_info->storage->u.earray.ea;
+
+ /* Check for unlimited dim. not being the slowest-changing dim. */
+ if(idx_info->layout->u.earray.unlim_dim > 0) {
+ hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */
+ unsigned ndims = (idx_info->layout->ndims - 1); /* Number of dimensions */
+ unsigned u;
+
+ /* Compute coordinate offset from scaled offset */
+ for(u = 0; u < ndims; u++)
+ swizzled_coords[u] = udata->scaled[u] * idx_info->layout->dim[u];
+
+ H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim);
+
+ /* Calculate the index of this chunk */
+ idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_down_chunks);
+ } /* end if */
+ else {
+ /* Calculate the index of this chunk */
+ idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->down_chunks, udata->scaled);
+ } /* end else */
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ H5D_earray_filt_elmt_t elmt; /* Extensible array element */
+
+ /* Get the info about the chunk for the index */
+ if(H5EA_get(ea, idx_info->dxpl_id, idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info")
+
+ /* Remove raw data chunk from file if not doing SWMR writes */
+ HDassert(H5F_addr_defined(elmt.addr));
+ if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ } /* end if */
+
+ /* Reset the info about the chunk for the index */
+ elmt.addr = HADDR_UNDEF;
+ elmt.nbytes = 0;
+ elmt.filter_mask = 0;
+ if(H5EA_set(ea, idx_info->dxpl_id, idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk info")
+ } /* end if */
+ else {
+ haddr_t addr = HADDR_UNDEF; /* Chunk address */
+
+ /* Get the address of the chunk for the index */
+ if(H5EA_get(ea, idx_info->dxpl_id, idx, &addr) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+
+ /* Remove raw data chunk from file if not doing SWMR writes */
+ HDassert(H5F_addr_defined(addr));
+ if(!(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)) {
+ H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+ } /* end if */
+
+ /* Reset the address of the chunk for the index */
+ addr = HADDR_UNDEF;
+ if(H5EA_set(ea, idx_info->dxpl_id, idx, &addr) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk address")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__earray_idx_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_delete_cb
+ *
+ * Purpose: Delete space for chunk in file
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__earray_idx_delete_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
+{
+ H5D_earray_ud_t *udata = (H5D_earray_ud_t *)_udata; /* User data for callback */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(chunk_rec);
+ HDassert(H5F_addr_defined(chunk_rec->chunk_addr));
+ HDassert(chunk_rec->nbytes > 0);
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Remove raw data chunk from file */
+ H5_CHECK_OVERFLOW(chunk_rec->nbytes, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(udata->f, H5FD_MEM_DRAW, udata->dxpl_id, chunk_rec->chunk_addr, (hsize_t)chunk_rec->nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free chunk")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_delete_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_delete
+ *
+ * Purpose: Delete index and raw data storage for entire dataset
+ * (i.e. all chunks)
+ *
+ * Note: This implementation is slow, particularly for sparse
+ * extensible arrays, replace it with call to H5EA_iterate()
+ * when that's available.
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 29, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+
+ /* Check if the index data structure has been allocated */
+ if(H5F_addr_defined(idx_info->storage->idx_addr)) {
+ H5D_earray_ud_t udata; /* User data for callback */
+ H5D_earray_ctx_ud_t ctx_udata; /* User data for extensible array open call */
+
+ /* Initialize user data for callback */
+ udata.f = idx_info->f;
+ udata.dxpl_id = idx_info->dxpl_id;
+
+ /* Iterate over the chunk addresses in the extensible array, deleting each chunk */
+ if(H5D__earray_idx_iterate(idx_info, H5D__earray_idx_delete_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses")
+
+ /* Close extensible array */
+ if(H5EA_close(idx_info->storage->u.earray.ea, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array")
+ idx_info->storage->u.earray.ea = NULL;
+
+ /* Set up the context user data */
+ ctx_udata.f = idx_info->f;
+ ctx_udata.chunk_size = idx_info->layout->size;
+
+ /* Delete extensible array */
+ if(H5EA_delete(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &ctx_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk extensible array")
+ idx_info->storage->idx_addr = HADDR_UNDEF;
+ } /* end if */
+ else
+ HDassert(NULL == idx_info->storage->u.earray.ea);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_delete() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_copy_setup
+ *
+ * Purpose: Set up any necessary information for copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info_src);
+ HDassert(idx_info_src->f);
+ HDassert(idx_info_src->pline);
+ HDassert(idx_info_src->layout);
+ HDassert(idx_info_src->storage);
+ HDassert(idx_info_dst);
+ HDassert(idx_info_dst->f);
+ HDassert(idx_info_dst->pline);
+ HDassert(idx_info_dst->layout);
+ HDassert(idx_info_dst->storage);
+ HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr));
+
+ /* Check if the source extensible array is open yet */
+ if(NULL == idx_info_src->storage->u.earray.ea)
+ /* Open the extensible array in file */
+ if(H5D__earray_idx_open(idx_info_src) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array")
+
+ /* Set copied metadata tag */
+ H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL);
+
+ /* Create the extensible array that describes chunked storage in the dest. file */
+ if(H5D__earray_idx_create(idx_info_dst) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
+ HDassert(H5F_addr_defined(idx_info_dst->storage->idx_addr));
+
+ /* Reset metadata tag */
+ H5_END_TAG(FAIL);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_copy_setup() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_copy_shutdown
+ *
+ * Purpose: Shutdown any information from copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
+ H5O_storage_chunk_t *storage_dst, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(storage_src);
+ HDassert(storage_src->u.earray.ea);
+ HDassert(storage_dst);
+ HDassert(storage_dst->u.earray.ea);
+
+ /* Close extensible arrays */
+ if(H5EA_close(storage_src->u.earray.ea, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array")
+ storage_src->u.earray.ea = NULL;
+ if(H5EA_close(storage_dst->u.earray.ea, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array")
+ storage_dst->u.earray.ea = NULL;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_copy_shutdown() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_size
+ *
+ * Purpose: Retrieve the amount of index storage for chunked dataset
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
+{
+ H5EA_t *ea; /* Pointer to extensible array structure */
+ H5EA_stat_t ea_stat; /* Extensible array statistics */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(index_size);
+
+ /* Open the extensible array in file */
+ if(H5D__earray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array")
+
+ /* Set convenience pointer to extensible array structure */
+ ea = idx_info->storage->u.earray.ea;
+
+ /* Get the extensible array statistics */
+ if(H5EA_get_stats(ea, &ea_stat) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array statistics")
+
+ /* Set the size of the extensible array */
+ *index_size = ea_stat.computed.hdr_size + ea_stat.computed.index_blk_size
+ + ea_stat.stored.super_blk_size + ea_stat.stored.data_blk_size;
+
+done:
+ if(idx_info->storage->u.earray.ea) {
+ if(H5EA_close(idx_info->storage->u.earray.ea, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array")
+ idx_info->storage->u.earray.ea = NULL;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_reset
+ *
+ * Purpose: Reset indexing information.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ /* Reset index info */
+ if(reset_addr) {
+ storage->idx_addr = HADDR_UNDEF;
+ storage->u.earray.dset_ohdr_addr = HADDR_UNDEF;
+ } /* end if */
+ storage->u.earray.ea = NULL;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_idx_reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_dump
+ *
+ * Purpose: Dump indexing information to a stream.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+ HDassert(stream);
+
+ HDfprintf(stream, " Address: %a\n", storage->idx_addr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__earray_idx_dump() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__earray_idx_dest
+ *
+ * Purpose: Release indexing information in memory.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, January 31, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->storage);
+
+ /* Check if the extensible array is open */
+ if(idx_info->storage->u.earray.ea) {
+ /* Close extensible array */
+ if(H5EA_close(idx_info->storage->u.earray.ea, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array")
+ idx_info->storage->u.earray.ea = NULL;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__earray_idx_dest() */
+
diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c
new file mode 100644
index 0000000..a1dc23b
--- /dev/null
+++ b/src/H5Dfarray.c
@@ -0,0 +1,1757 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
+ * Thursday, April 30, 2009
+ *
+ * Purpose: Fixed array indexed (chunked) I/O functions.
+ * The chunk coordinate is mapped as an index into an array of
+ * disk addresses for the chunks.
+ *
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Dmodule.h" /* This source code file is part of the H5D module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Dpkg.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FAprivate.h" /* Fixed arrays */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File space management */
+#include "H5VMprivate.h" /* Vector functions */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* Value to fill unset array elements with */
+#define H5D_FARRAY_FILL HADDR_UNDEF
+#define H5D_FARRAY_FILT_FILL {HADDR_UNDEF, 0, 0}
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/* Fixed array create/open user data */
+typedef struct H5D_farray_ctx_ud_t {
+ const H5F_t *f; /* Pointer to file info */
+ uint32_t chunk_size; /* Size of chunk (bytes) */
+} H5D_farray_ctx_ud_t;
+
+/* Fixed array callback context */
+typedef struct H5D_farray_ctx_t {
+ size_t file_addr_len; /* Size of addresses in the file (bytes) */
+ size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */
+} H5D_farray_ctx_t;
+
+/* User data for chunk deletion callback */
+typedef struct H5D_farray_del_ud_t {
+ H5F_t *f; /* File pointer for operation */
+ hid_t dxpl_id; /* DXPL ID for operation */
+ hbool_t filtered; /* Whether the chunks are filtered */
+ uint32_t unfilt_size; /* Size of unfiltered chunk in bytes */
+} H5D_farray_del_ud_t;
+
+/* Fixed Array callback info for iteration over chunks */
+typedef struct H5D_farray_it_ud_t {
+ H5D_chunk_common_ud_t common; /* Common info for Fixed Array user data (must be first) */
+ H5D_chunk_rec_t chunk_rec; /* Generic chunk record for callback */
+ hbool_t filtered; /* Whether the chunks are filtered */
+ H5D_chunk_cb_func_t cb; /* Chunk callback routine */
+ void *udata; /* User data for chunk callback routine */
+} H5D_farray_it_ud_t;
+
+/* Native fixed array element for chunks w/filters */
+typedef struct H5D_farray_filt_elmt_t {
+ haddr_t addr; /* Address of chunk */
+ uint32_t nbytes; /* Size of chunk (in file) */
+ uint32_t filter_mask; /* Excluded filters for chunk */
+} H5D_farray_filt_elmt_t;
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Fixed Array iterator callbacks */
+static int H5D__farray_idx_iterate_cb(hsize_t idx, const void *_elmt, void *_udata);
+static int H5D__farray_idx_delete_cb(hsize_t idx, const void *_elmt, void *_udata);
+
+/* Fixed array class callbacks for chunks w/o filters */
+static void *H5D__farray_crt_context(void *udata);
+static herr_t H5D__farray_dst_context(void *ctx);
+static herr_t H5D__farray_fill(void *nat_blk, size_t nelmts);
+static herr_t H5D__farray_encode(void *raw, const void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__farray_decode(const void *raw, void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__farray_debug(FILE *stream, int indent, int fwidth,
+ hsize_t idx, const void *elmt);
+static void *H5D__farray_crt_dbg_context(H5F_t *f, hid_t dxpl_id,
+ haddr_t obj_addr);
+static herr_t H5D__farray_dst_dbg_context(void *dbg_ctx);
+
+/* Fixed array class callbacks for chunks w/filters */
+/* (some shared with callbacks for chunks w/o filters) */
+static herr_t H5D__farray_filt_fill(void *nat_blk, size_t nelmts);
+static herr_t H5D__farray_filt_encode(void *raw, const void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__farray_filt_decode(const void *raw, void *elmt, size_t nelmts,
+ void *ctx);
+static herr_t H5D__farray_filt_debug(FILE *stream, int indent, int fwidth,
+ hsize_t idx, const void *elmt);
+
+/* Chunked layout indexing callbacks */
+static herr_t H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info,
+ const H5S_t *space, haddr_t dset_ohdr_addr);
+static herr_t H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info);
+static hbool_t H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage);
+static herr_t H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata, const H5D_t *dset);
+static herr_t H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata);
+static int H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata);
+static herr_t H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata);
+static herr_t H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst);
+static herr_t H5D__farray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
+ H5O_storage_chunk_t *storage_dst, hid_t dxpl_id);
+static herr_t H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info,
+ hsize_t *size);
+static herr_t H5D__farray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr);
+static herr_t H5D__farray_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream);
+static herr_t H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info);
+
+/* Generic extensible array routines */
+static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__farray_idx_undepend(const H5D_chk_idx_info_t *idx_info);
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Fixed array indexed chunk I/O ops */
+const H5D_chunk_ops_t H5D_COPS_FARRAY[1] = {{
+ TRUE, /* Fixed array indices support SWMR access */
+ H5D__farray_idx_init, /* init */
+ H5D__farray_idx_create, /* create */
+ H5D__farray_idx_is_space_alloc, /* is_space_alloc */
+ H5D__farray_idx_insert, /* insert */
+ H5D__farray_idx_get_addr, /* get_addr */
+ NULL, /* resize */
+ H5D__farray_idx_iterate, /* iterate */
+ H5D__farray_idx_remove, /* remove */
+ H5D__farray_idx_delete, /* delete */
+ H5D__farray_idx_copy_setup, /* copy_setup */
+ H5D__farray_idx_copy_shutdown, /* copy_shutdown */
+ H5D__farray_idx_size, /* size */
+ H5D__farray_idx_reset, /* reset */
+ H5D__farray_idx_dump, /* dump */
+ H5D__farray_idx_dest /* destroy */
+}};
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* Fixed array class callbacks for dataset chunks w/o filters */
+const H5FA_class_t H5FA_CLS_CHUNK[1]={{
+ H5FA_CLS_CHUNK_ID, /* Type of fixed array */
+ "Chunk w/o filters", /* Name of fixed array class */
+ sizeof(haddr_t), /* Size of native element */
+ H5D__farray_crt_context, /* Create context */
+ H5D__farray_dst_context, /* Destroy context */
+ H5D__farray_fill, /* Fill block of missing elements callback */
+ H5D__farray_encode, /* Element encoding callback */
+ H5D__farray_decode, /* Element decoding callback */
+ H5D__farray_debug, /* Element debugging callback */
+ H5D__farray_crt_dbg_context, /* Create debugging context */
+ H5D__farray_dst_dbg_context /* Destroy debugging context */
+}};
+
+/* Fixed array class callbacks for dataset chunks w/filters */
+const H5FA_class_t H5FA_CLS_FILT_CHUNK[1]={{
+ H5FA_CLS_FILT_CHUNK_ID, /* Type of fixed array */
+ "Chunk w/filters", /* Name of fixed array class */
+ sizeof(H5D_farray_filt_elmt_t), /* Size of native element */
+ H5D__farray_crt_context, /* Create context */
+ H5D__farray_dst_context, /* Destroy context */
+ H5D__farray_filt_fill, /* Fill block of missing elements callback */
+ H5D__farray_filt_encode, /* Element encoding callback */
+ H5D__farray_filt_decode, /* Element decoding callback */
+ H5D__farray_filt_debug, /* Element debugging callback */
+ H5D__farray_crt_dbg_context, /* Create debugging context */
+ H5D__farray_dst_dbg_context /* Destroy debugging context */
+}};
+
+/* Declare a free list to manage the H5D_farray_ctx_t struct */
+H5FL_DEFINE_STATIC(H5D_farray_ctx_t);
+
+/* Declare a free list to manage the H5D_farray_ctx_ud_t struct */
+H5FL_DEFINE_STATIC(H5D_farray_ctx_ud_t);
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_crt_context
+ *
+ * Purpose: Create context for callbacks
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D__farray_crt_context(void *_udata)
+{
+ H5D_farray_ctx_t *ctx; /* Fixed array callback context */
+ H5D_farray_ctx_ud_t *udata = (H5D_farray_ctx_ud_t *)_udata; /* User data for fixed array context */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(udata->chunk_size > 0);
+
+ /* Allocate new context structure */
+ if(NULL == (ctx = H5FL_MALLOC(H5D_farray_ctx_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context")
+
+ /* Initialize the context */
+ ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f);
+
+ /* Compute the size required for encoding the size of a chunk, allowing
+ * for an extra byte, in case the filter makes the chunk larger.
+ */
+ ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8);
+ if(ctx->chunk_size_len > 8)
+ ctx->chunk_size_len = 8;
+
+ /* Set return value */
+ ret_value = ctx;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_crt_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_dst_context
+ *
+ * Purpose: Destroy context for callbacks
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_dst_context(void *_ctx)
+{
+ H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(ctx);
+
+ /* Release context structure */
+ ctx = H5FL_FREE(H5D_farray_ctx_t, ctx);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_dst_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_fill
+ *
+ * Purpose: Fill "missing elements" in block of elements
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_fill(void *nat_blk, size_t nelmts)
+{
+ haddr_t fill_val = H5D_FARRAY_FILL; /* Value to fill elements with */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(nat_blk);
+ HDassert(nelmts);
+
+ H5VM_array_fill(nat_blk, &fill_val, H5FA_CLS_CHUNK->nat_elmt_size, nelmts);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_fill() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_encode
+ *
+ * Purpose: Encode an element from "native" to "raw" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_encode(void *raw, const void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */
+ const haddr_t *elmt = (const haddr_t *)_elmt; /* Convenience pointer to native elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+ HDassert(ctx);
+
+ /* Encode native elements into raw elements */
+ while(nelmts) {
+ /* Encode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_encode_len(ctx->file_addr_len, (uint8_t **)&raw, *elmt);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to encode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_decode
+ *
+ * Purpose: Decode an element from "raw" to "native" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */
+ haddr_t *elmt = (haddr_t *)_elmt; /* Convenience pointer to native elements */
+ const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+
+ /* Decode raw elements into native elements */
+ while(nelmts) {
+ /* Decode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_decode_len(ctx->file_addr_len, &raw, elmt);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to decode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_debug
+ *
+ * Purpose: Display an element for debugging
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_debug(FILE *stream, int indent, int fwidth, hsize_t idx,
+ const void *elmt)
+{
+ char temp_str[128]; /* Temporary string, for formatting */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(stream);
+ HDassert(elmt);
+
+ /* Print element */
+ sprintf(temp_str, "Element #%llu:", (unsigned long long)idx);
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, temp_str,
+ *(const haddr_t *)elmt);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_crt_dbg_context
+ *
+ * Purpose: Create context for debugging callback
+ * (get the layout message in the specified object header)
+ *
+ * Return: Success: non-NULL
+ * Failure: NULL
+ *
+ * Programmer: Vailin Choi
+ * 5th August, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D__farray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr)
+{
+ H5D_farray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */
+ H5O_loc_t obj_loc; /* Pointer to an object's location */
+ hbool_t obj_opened = FALSE; /* Flag to indicate that the object header was opened */
+ H5O_layout_t layout; /* Layout message */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(H5F_addr_defined(obj_addr));
+
+ /* Allocate context for debugging callback */
+ if(NULL == (dbg_ctx = H5FL_MALLOC(H5D_farray_ctx_ud_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context")
+
+ /* Set up the object header location info */
+ H5O_loc_reset(&obj_loc);
+ obj_loc.file = f;
+ obj_loc.addr = obj_addr;
+
+ /* Open the object header where the layout message resides */
+ if(H5O_open(&obj_loc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header")
+ obj_opened = TRUE;
+
+ /* Read the layout message */
+ if(NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout, dxpl_id))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info")
+
+ /* close the object header */
+ if(H5O_close(&obj_loc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header")
+
+ /* Create user data */
+ dbg_ctx->f = f;
+ dbg_ctx->chunk_size = layout.u.chunk.size;
+
+ /* Set return value */
+ ret_value = dbg_ctx;
+
+done:
+ /* Cleanup on error */
+ if(ret_value == NULL) {
+ /* Release context structure */
+ if(dbg_ctx)
+ dbg_ctx = H5FL_FREE(H5D_farray_ctx_ud_t, dbg_ctx);
+
+ /* Close object header */
+ if(obj_opened) {
+ if(H5O_close(&obj_loc) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header")
+ } /* end if */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_crt_dbg_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_dst_dbg_context
+ *
+ * Purpose: Destroy context for debugging callback
+ * (free the layout message from the specified object header)
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * 24th September, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_dst_dbg_context(void *_dbg_ctx)
+{
+ H5D_farray_ctx_ud_t *dbg_ctx = (H5D_farray_ctx_ud_t *)_dbg_ctx; /* Context for fixed array callback */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(dbg_ctx);
+
+ /* Release context structure */
+ dbg_ctx = H5FL_FREE(H5D_farray_ctx_ud_t, dbg_ctx);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_dst_dbg_context() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_filt_fill
+ *
+ * Purpose: Fill "missing elements" in block of elements
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_filt_fill(void *nat_blk, size_t nelmts)
+{
+ H5D_farray_filt_elmt_t fill_val = H5D_FARRAY_FILT_FILL; /* Value to fill elements with */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(nat_blk);
+ HDassert(nelmts);
+ HDassert(sizeof(fill_val) == H5FA_CLS_FILT_CHUNK->nat_elmt_size);
+
+ H5VM_array_fill(nat_blk, &fill_val, H5FA_CLS_FILT_CHUNK->nat_elmt_size, nelmts);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_filt_fill() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_filt_encode
+ *
+ * Purpose: Encode an element from "native" to "raw" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_filt_encode(void *_raw, const void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */
+ uint8_t *raw = (uint8_t *)_raw; /* Convenience pointer to raw elements */
+ const H5D_farray_filt_elmt_t *elmt = (const H5D_farray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+ HDassert(ctx);
+
+ /* Encode native elements into raw elements */
+ while(nelmts) {
+ /* Encode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_encode_len(ctx->file_addr_len, &raw, elmt->addr);
+ UINT64ENCODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len);
+ UINT32ENCODE(raw, elmt->filter_mask);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to encode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_filt_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_filt_decode
+ *
+ * Purpose: Decode an element from "raw" to "native" form
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_filt_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx)
+{
+ H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */
+ H5D_farray_filt_elmt_t *elmt = (H5D_farray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */
+ const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(raw);
+ HDassert(elmt);
+ HDassert(nelmts);
+
+ /* Decode raw elements into native elements */
+ while(nelmts) {
+ /* Decode element */
+ /* (advances 'raw' pointer) */
+ H5F_addr_decode_len(ctx->file_addr_len, &raw, &elmt->addr);
+ UINT64DECODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len);
+ UINT32DECODE(raw, elmt->filter_mask);
+
+ /* Advance native element pointer */
+ elmt++;
+
+ /* Decrement # of elements to decode */
+ nelmts--;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_filt_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_filt_debug
+ *
+ * Purpose: Display an element for debugging
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx,
+ const void *_elmt)
+{
+ const H5D_farray_filt_elmt_t *elmt = (const H5D_farray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */
+ char temp_str[128]; /* Temporary string, for formatting */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(stream);
+ HDassert(elmt);
+
+ /* Print element */
+ sprintf(temp_str, "Element #%llu:", (unsigned long long)idx);
+ HDfprintf(stream, "%*s%-*s {%a, %u, %0x}\n", indent, "", fwidth, temp_str,
+ elmt->addr, elmt->nbytes, elmt->filter_mask);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_filt_debug() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_depend
+ *
+ * Purpose: Create flush dependency between fixed array and dataset's
+ * object header.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Copied and modified from H5Dearrary.c
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_depend(const H5D_chk_idx_info_t *idx_info)
+{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(idx_info->storage->u.farray.fa);
+
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.farray.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+
+ /* Make the extensible array a child flush dependency of the dataset's object header */
+ if(H5FA_depend((H5AC_info_t *)oh_proxy, idx_info->storage->u.farray.fa) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header")
+
+done:
+ /* Unpin the dataset's object header proxy */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_undepend
+ *
+ * Purpose: Remove flush dependency between fixed array and dataset's
+ * object header.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Copied and modified from H5Dearray.c
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_undepend(const H5D_chk_idx_info_t *idx_info)
+{
+ H5O_loc_t oloc; /* Temporary object header location for dataset */
+ H5O_proxy_t *oh_proxy = NULL; /* Dataset's object header proxy */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(idx_info->storage->u.farray.fa);
+
+ /* Set up object header location for dataset */
+ H5O_loc_reset(&oloc);
+ oloc.file = idx_info->f;
+ oloc.addr = idx_info->storage->u.farray.dset_ohdr_addr;
+
+ /* Pin the dataset's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy(&oloc, idx_info->dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header proxy")
+
+ /* Remove the extensible array as a child flush dependency of the dataset's object header */
+ if(H5FA_undepend((H5AC_info_t *)oh_proxy, idx_info->storage->u.farray.fa) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTUNDEPEND, FAIL, "unable to remove flush dependency on object header")
+
+done:
+ /* Unpin the dataset's object header proxy */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_undepend() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_init
+ *
+ * Purpose: Initialize the indexing information for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Wednensday, May 23, 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNUSED *space, haddr_t dset_ohdr_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(dset_ohdr_addr));
+
+ idx_info->storage->u.farray.dset_ohdr_addr = dset_ohdr_addr;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_idx_init() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_open
+ *
+ * Purpose: Opens an existing fixed array and initializes
+ * the layout struct with information about the storage.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info)
+{
+ H5D_farray_ctx_ud_t udata; /* User data for fixed array open call */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type);
+ HDassert(idx_info->storage);
+ HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(NULL == idx_info->storage->u.farray.fa);
+
+ /* Set up the user data */
+ udata.f = idx_info->f;
+ udata.chunk_size = idx_info->layout->size;
+
+ /* Open the fixed array for the chunk index */
+ if(NULL == (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &udata)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array")
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)
+ if(H5D__farray_idx_depend(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_open() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_create
+ *
+ * Purpose: Creates a new indexed-storage fixed array and initializes
+ * the layout struct with information about the storage. The
+ * struct should be immediately written to the object header.
+ *
+ * This function must be called before passing LAYOUT to any of
+ * the other indexed storage functions!
+ *
+ * Return: Non-negative on success (with the LAYOUT argument initialized
+ * and ready to write to an object header). Negative on failure.
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info)
+{
+ H5FA_create_t cparam; /* Fixed array creation parameters */
+ H5D_farray_ctx_ud_t udata; /* User data for fixed array create call */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(!H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(NULL == idx_info->storage->u.farray.fa);
+ HDassert(idx_info->layout->nchunks);
+
+ /* General parameters */
+ if(idx_info->pline->nused > 0) {
+ unsigned chunk_size_len; /* Size of encoded chunk size */
+
+ /* Compute the size required for encoding the size of a chunk, allowing
+ * for an extra byte, in case the filter makes the chunk larger.
+ */
+ chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8);
+ if(chunk_size_len > 8)
+ chunk_size_len = 8;
+
+ cparam.cls = H5FA_CLS_FILT_CHUNK;
+ cparam.raw_elmt_size = (uint8_t)(H5F_SIZEOF_ADDR(idx_info->f) + chunk_size_len + 4);
+ } /* end if */
+ else {
+ cparam.cls = H5FA_CLS_CHUNK;
+ cparam.raw_elmt_size = (uint8_t)H5F_SIZEOF_ADDR(idx_info->f);
+ } /* end else */
+ cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.farray.cparam.max_dblk_page_nelmts_bits;
+ HDassert(cparam.max_dblk_page_nelmts_bits > 0);
+ cparam.nelmts = idx_info->layout->max_nchunks;
+
+ /* Set up the user data */
+ udata.f = idx_info->f;
+ udata.chunk_size = idx_info->layout->size;
+
+ /* Create the fixed array for the chunk index */
+ if(NULL == (idx_info->storage->u.farray.fa = H5FA_create(idx_info->f, idx_info->dxpl_id, &cparam, &udata)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create fixed array")
+
+ /* Get the address of the fixed array in file */
+ if(H5FA_get_addr(idx_info->storage->u.farray.fa, &(idx_info->storage->idx_addr)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array address")
+
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(idx_info->f) & H5F_ACC_SWMR_WRITE)
+ if(H5D__farray_idx_depend(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on object header")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_is_space_alloc
+ *
+ * Purpose: Query if space is allocated for index method
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr))
+} /* end H5D__farray_idx_is_space_alloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_insert
+ *
+ * Purpose: Insert chunk address into the indexing structure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; 5 May 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata,
+ const H5D_t H5_ATTR_UNUSED *dset)
+{
+ H5FA_t *fa; /* Pointer to fixed array structure */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the fixed array is open yet */
+ if(NULL == idx_info->storage->u.farray.fa)
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set convenience pointer to fixed array structure */
+ fa = idx_info->storage->u.farray.fa;
+
+ if(!H5F_addr_defined(udata->chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "The chunk should have allocated already")
+ if(udata->chunk_idx != (udata->chunk_idx & 0xffffffff)) /* negative value */
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "chunk index must be less than 2^32")
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ H5D_farray_filt_elmt_t elmt; /* Fixed array element */
+
+ elmt.addr = udata->chunk_block.offset;
+ H5_CHECKED_ASSIGN(elmt.nbytes, uint32_t, udata->chunk_block.length, hsize_t);
+ elmt.filter_mask = udata->filter_mask;
+
+ /* Set the info for the chunk */
+ if(H5FA_set(fa, idx_info->dxpl_id, udata->chunk_idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk info")
+ } /* end if */
+ else {
+ /* Set the address for the chunk */
+ if(H5FA_set(fa, idx_info->dxpl_id, udata->chunk_idx, &udata->chunk_block.offset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk address")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__farray_idx_insert() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_get_addr
+ *
+ * Purpose: Get the file address of a chunk if file space has been
+ * assigned. Save the retrieved information in the udata
+ * supplied.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
+{
+ H5FA_t *fa; /* Pointer to fixed array structure */
+ hsize_t idx; /* Array index of chunk */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the fixed array is open yet */
+ if(NULL == idx_info->storage->u.farray.fa)
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set convenience pointer to fixed array structure */
+ fa = idx_info->storage->u.farray.fa;
+
+ /* Calculate the index of this chunk */
+ idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, udata->common.scaled);
+
+ udata->chunk_idx = idx;
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ H5D_farray_filt_elmt_t elmt; /* Fixed array element */
+
+ /* Get the information for the chunk */
+ if(H5FA_get(fa, idx_info->dxpl_id, idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info")
+
+ /* Set the info for the chunk */
+ udata->chunk_block.offset = elmt.addr;
+ udata->chunk_block.length = elmt.nbytes;
+ udata->filter_mask = elmt.filter_mask;
+ } /* end if */
+ else {
+ /* Get the address for the chunk */
+ if(H5FA_get(fa, idx_info->dxpl_id, idx, &udata->chunk_block.offset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+
+ /* Update the other (constant) information for the chunk */
+ udata->chunk_block.length = idx_info->layout->size;
+ udata->filter_mask = 0;
+ } /* end else */
+
+ if(!H5F_addr_defined(udata->chunk_block.offset))
+ udata->chunk_block.length = 0;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__farray_idx_get_addr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_iterate_cb
+ *
+ * Purpose: Callback routine for fixed array element iteration.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__farray_idx_iterate_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void *_udata)
+{
+ H5D_farray_it_ud_t *udata = (H5D_farray_it_ud_t *)_udata; /* User data */
+ unsigned ndims; /* Rank of chunk */
+ int curr_dim; /* Current dimension */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Compose generic chunk record for callback */
+ if(udata->filtered) {
+ const H5D_farray_filt_elmt_t *filt_elmt = (const H5D_farray_filt_elmt_t *)_elmt;
+
+ udata->chunk_rec.chunk_addr = filt_elmt->addr;
+ udata->chunk_rec.nbytes = filt_elmt->nbytes;
+ udata->chunk_rec.filter_mask = filt_elmt->filter_mask;
+ } /* end if */
+ else
+ udata->chunk_rec.chunk_addr = *(const haddr_t *)_elmt;
+
+ /* Make "generic chunk" callback */
+ if(H5F_addr_defined(udata->chunk_rec.chunk_addr))
+ if((ret_value = (udata->cb)(&udata->chunk_rec, udata->udata)) < 0)
+ HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback");
+
+ /* Update coordinates of chunk in dataset */
+ ndims = udata->common.layout->ndims - 1;
+ HDassert(ndims > 0);
+ curr_dim = (int)(ndims - 1);
+ while(curr_dim >= 0) {
+ /* Increment coordinate in current dimension */
+ udata->chunk_rec.scaled[curr_dim]++;
+
+ /* Check if we went off the end of the current dimension */
+ if(udata->chunk_rec.scaled[curr_dim] >= udata->common.layout->chunks[curr_dim]) {
+ /* Reset coordinate & move to next faster dimension */
+ udata->chunk_rec.scaled[curr_dim] = 0;
+ curr_dim--;
+ } /* end if */
+ else
+ break;
+ } /* end while */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__farray_idx_iterate_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_iterate
+ *
+ * Purpose: Iterate over the chunks in an index, making a callback
+ * for each one.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata)
+{
+ H5FA_t *fa; /* Pointer to fixed array structure */
+ H5FA_stat_t fa_stat; /* Fixed array statistics */
+ int ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(chunk_cb);
+ HDassert(chunk_udata);
+
+ /* Check if the fixed array is open yet */
+ if(NULL == idx_info->storage->u.farray.fa)
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set convenience pointer to fixed array structure */
+ fa = idx_info->storage->u.farray.fa;
+
+ /* Get the fixed array statistics */
+ if(H5FA_get_stats(fa, &fa_stat) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array statistics")
+
+ /* Check if there are any array elements */
+ if(fa_stat.nelmts > 0) {
+ H5D_farray_it_ud_t udata; /* User data for iteration callback */
+
+ /* Initialize userdata */
+ HDmemset(&udata, 0, sizeof udata);
+ udata.common.layout = idx_info->layout;
+ udata.common.storage = idx_info->storage;
+ udata.common.rdcc = NULL;
+ HDmemset(&udata.chunk_rec, 0, sizeof(udata.chunk_rec));
+ udata.filtered = (idx_info->pline->nused > 0);
+ if(!udata.filtered) {
+ udata.chunk_rec.nbytes = idx_info->layout->size;
+ udata.chunk_rec.filter_mask = 0;
+ } /* end if */
+ udata.cb = chunk_cb;
+ udata.udata = chunk_udata;
+
+ /* Iterate over the fixed array elements */
+ if((ret_value = H5FA_iterate(fa, idx_info->dxpl_id, H5D__farray_idx_iterate_cb, &udata)) < 0)
+ HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over fixed array chunk index");
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_iterate() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_remove
+ *
+ * Purpose: Remove chunk from index.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata)
+{
+ H5FA_t *fa; /* Pointer to fixed array structure */
+ hsize_t idx; /* Array index of chunk */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(udata);
+
+ /* Check if the fixed array is open yet */
+ if(NULL == idx_info->storage->u.farray.fa)
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set convenience pointer to fixed array structure */
+ fa = idx_info->storage->u.farray.fa;
+
+ /* Calculate the index of this chunk */
+ idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, udata->scaled);
+
+ /* Check for filters on chunks */
+ if(idx_info->pline->nused > 0) {
+ H5D_farray_filt_elmt_t elmt; /* Fixed array element */
+
+ /* Get the info about the chunk for the index */
+ if(H5FA_get(fa, idx_info->dxpl_id, idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info")
+
+ /* Remove raw data chunk from file */
+ HDassert(H5F_addr_defined(elmt.addr));
+ H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+
+ /* Reset the info about the chunk for the index */
+ elmt.addr = HADDR_UNDEF;
+ elmt.nbytes = 0;
+ elmt.filter_mask = 0;
+ if(H5FA_set(fa, idx_info->dxpl_id, idx, &elmt) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk info")
+ } /* end if */
+ else {
+ haddr_t addr = HADDR_UNDEF; /* Chunk address */
+
+ /* Get the address of the chunk for the index */
+ if(H5FA_get(fa, idx_info->dxpl_id, idx, &addr) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+
+ /* Remove raw data chunk from file */
+ HDassert(H5F_addr_defined(addr));
+ H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk")
+
+ /* Reset the address of the chunk for the index */
+ addr = HADDR_UNDEF;
+ if(H5FA_set(fa, idx_info->dxpl_id, idx, &addr) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk address")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__farray_idx_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_delete_cb
+ *
+ * Purpose: Delete space for chunk in file
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__farray_idx_delete_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void *_udata)
+{
+ H5D_farray_del_ud_t *udata = (H5D_farray_del_ud_t *)_udata; /* User data for callback */
+ haddr_t chunk_addr; /* Address of chunk */
+ uint32_t nbytes; /* Size of chunk */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(_elmt);
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Check for filtered elements */
+ if(udata->filtered) {
+ const H5D_farray_filt_elmt_t *filt_elmt = (const H5D_farray_filt_elmt_t *)_elmt;
+
+ chunk_addr = filt_elmt->addr;
+ nbytes = filt_elmt->nbytes;
+ } /* end if */
+ else {
+ chunk_addr = *(const haddr_t *)_elmt;
+ nbytes = udata->unfilt_size;
+ } /* end else */
+
+ /* Remove raw data chunk from file */
+ H5_CHECK_OVERFLOW(nbytes, /*From: */uint32_t, /*To: */hsize_t);
+ if(H5MF_xfree(udata->f, H5FD_MEM_DRAW, udata->dxpl_id, chunk_addr, (hsize_t)nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free chunk")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_delete_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_delete
+ *
+ * Purpose: Delete index and raw data storage for entire dataset
+ * (i.e. all chunks)
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+
+ /* Check if the index data structure has been allocated */
+ if(H5F_addr_defined(idx_info->storage->idx_addr)) {
+ H5FA_t *fa; /* Pointer to fixed array structure */
+ H5FA_stat_t fa_stat; /* Fixed array statistics */
+ H5D_farray_ctx_ud_t ctx_udata; /* User data for fixed array open call */
+
+ /* Check if the fixed array is open yet */
+ if(NULL == idx_info->storage->u.farray.fa)
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set convenience pointer to fixed array structure */
+ fa = idx_info->storage->u.farray.fa;
+
+ /* Get the fixed array statistics */
+ if(H5FA_get_stats(fa, &fa_stat) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array statistics")
+
+ /* Check if there are any array elements */
+ if(fa_stat.nelmts > 0) {
+ H5D_farray_del_ud_t udata; /* User data for callback */
+
+ /* Initialize user data for callback */
+ udata.f = idx_info->f;
+ udata.dxpl_id = idx_info->dxpl_id;
+ udata.filtered = (idx_info->pline->nused > 0);
+ udata.unfilt_size = idx_info->layout->size;
+
+ /* Iterate over the chunk addresses in the fixed array, deleting each chunk */
+ if(H5FA_iterate(fa, idx_info->dxpl_id, H5D__farray_idx_delete_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses")
+ } /* end if */
+
+ /* Close fixed array */
+ if(H5FA_close(idx_info->storage->u.farray.fa, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array")
+ idx_info->storage->u.farray.fa = NULL;
+
+ /* Set up the user data */
+ ctx_udata.f = idx_info->f;
+ ctx_udata.chunk_size = idx_info->layout->size;
+
+ /* Delete fixed array */
+ if(H5FA_delete(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &ctx_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk fixed array")
+ idx_info->storage->idx_addr = HADDR_UNDEF;
+ } /* end if */
+ else
+ HDassert(NULL == idx_info->storage->u.farray.fa);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_delete() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_copy_setup
+ *
+ * Purpose: Set up any necessary information for copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info_src);
+ HDassert(idx_info_src->f);
+ HDassert(idx_info_src->pline);
+ HDassert(idx_info_src->layout);
+ HDassert(idx_info_src->storage);
+ HDassert(idx_info_dst);
+ HDassert(idx_info_dst->f);
+ HDassert(idx_info_dst->pline);
+ HDassert(idx_info_dst->layout);
+ HDassert(idx_info_dst->storage);
+ HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr));
+
+ /* Check if the source fixed array is open yet */
+ if(NULL == idx_info_src->storage->u.farray.fa)
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info_src) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set copied metadata tag */
+ H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL);
+
+ /* Create the fixed array that describes chunked storage in the dest. file */
+ if(H5D__farray_idx_create(idx_info_dst) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
+ HDassert(H5F_addr_defined(idx_info_dst->storage->idx_addr));
+
+ /* Reset metadata tag */
+ H5_END_TAG(FAIL);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_copy_setup() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_copy_shutdown
+ *
+ * Purpose: Shutdown any information from copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src,
+ H5O_storage_chunk_t *storage_dst, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(storage_src);
+ HDassert(storage_src->u.farray.fa);
+ HDassert(storage_dst);
+ HDassert(storage_dst->u.farray.fa);
+
+ /* Close fixed arrays */
+ if(H5FA_close(storage_src->u.farray.fa, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array")
+ storage_src->u.farray.fa = NULL;
+ if(H5FA_close(storage_dst->u.farray.fa, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array")
+ storage_dst->u.farray.fa = NULL;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_copy_shutdown() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_size
+ *
+ * Purpose: Retrieve the amount of index storage for chunked dataset
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size)
+{
+ H5FA_t *fa; /* Pointer to fixed array structure */
+ H5FA_stat_t fa_stat; /* Fixed array statistics */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+ HDassert(index_size);
+
+ /* Open the fixed array in file */
+ if(H5D__farray_idx_open(idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array")
+
+ /* Set convenience pointer to fixed array structure */
+ fa = idx_info->storage->u.farray.fa;
+
+ /* Get the fixed array statistics */
+ if(H5FA_get_stats(fa, &fa_stat) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array statistics")
+
+ *index_size = fa_stat.hdr_size;
+ *index_size += fa_stat.dblk_size;
+
+done:
+ if(idx_info->storage->u.farray.fa) {
+ if(H5FA_close(idx_info->storage->u.farray.fa, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array")
+ idx_info->storage->u.farray.fa = NULL;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_reset
+ *
+ * Purpose: Reset indexing information.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ /* Reset index info */
+ if(reset_addr)
+ storage->idx_addr = HADDR_UNDEF;
+ storage->u.farray.fa = NULL;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_idx_reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_dump
+ *
+ * Purpose: Dump indexing information to a stream.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+ HDassert(stream);
+
+ HDfprintf(stream, " Address: %a\n", storage->idx_addr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__farray_idx_dump() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__farray_idx_dest
+ *
+ * Purpose: Release indexing information in memory.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * Thursday, April 30, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->storage);
+
+ /* Check if the fixed array is open */
+ if(idx_info->storage->u.farray.fa) {
+ /* Close fixed array */
+ if(H5FA_close(idx_info->storage->u.farray.fa, idx_info->dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array")
+ idx_info->storage->u.farray.fa = NULL;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__farray_idx_dest() */
+
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 4ec48bb..8073d9d 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -61,12 +61,14 @@ static herr_t H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id,
const H5T_t *type);
static herr_t H5D__cache_dataspace_info(const H5D_t *dset);
static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space);
+static herr_t H5D__swmr_setup(const H5D_t *dset, hid_t dxpl_id);
+static herr_t H5D__swmr_teardown(const H5D_t *dataset, hid_t dxpl_id);
static herr_t H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset,
hid_t dapl_id);
static herr_t H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id);
static herr_t H5D__init_storage(const H5D_t *dataset, hbool_t full_overwrite,
hsize_t old_dim[], hid_t dxpl_id);
-
+static herr_t H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id);
/*********************/
/* Package Variables */
@@ -646,7 +648,7 @@ H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type)
{
htri_t relocatable; /* Flag whether the type is relocatable */
htri_t immutable; /* Flag whether the type is immutable */
- hbool_t use_latest_format; /* Flag indicating the newest file format should be used */
+ hbool_t use_latest_format; /* Flag indicating the 'latest datatype version support' is enabled */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -664,8 +666,8 @@ H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type)
if((immutable = H5T_is_immutable(type)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't check datatype?")
- /* Get the file's 'use the latest version of the format' flag */
- use_latest_format = H5F_USE_LATEST_FORMAT(file);
+ /* Get the file's 'use the latest datatype version support' flag */
+ use_latest_format = H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DATATYPE);
/* Copy the datatype if it's a custom datatype or if it'll change when it's location is changed */
if(!immutable || relocatable || use_latest_format) {
@@ -718,6 +720,7 @@ static herr_t
H5D__cache_dataspace_info(const H5D_t *dset)
{
int sndims; /* Signed number of dimensions of dataspace rank */
+ unsigned u; /* Local index value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -730,6 +733,10 @@ H5D__cache_dataspace_info(const H5D_t *dset)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't cache dataspace dimensions")
dset->shared->ndims = (unsigned)sndims;
+ /* Compute the inital 'power2up' values */
+ for(u = 0; u < dset->shared->ndims; u++)
+ dset->shared->curr_power2up[u] = H5VM_power2up(dset->shared->curr_dims[u]);
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__cache_dataspace_info() */
@@ -752,7 +759,7 @@ done:
static herr_t
H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space)
{
- hbool_t use_latest_format; /* Flag indicating the newest file format should be used */
+ hbool_t use_latest_format; /* Flag indicating the 'latest dataspace version support' is enabled */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -762,8 +769,8 @@ H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space)
HDassert(dset);
HDassert(space);
- /* Get the file's 'use the latest version of the format' flag */
- use_latest_format = H5F_USE_LATEST_FORMAT(file);
+ /* Get the file's 'use the latest dataspace version support' flag */
+ use_latest_format = H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DATASPACE);
/* Copy dataspace for dataset */
if(NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE)))
@@ -788,6 +795,105 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__swmr_setup
+ *
+ * Purpose: Set up SWMR access for a chunked dataset, if possible
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, April 27, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__swmr_setup(const H5D_t *dataset, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checking */
+ HDassert(dataset);
+
+ /* Check if it's possible to enable swmr access to this dataset */
+ if(dataset->shared->layout.type == H5D_CHUNKED &&
+ dataset->shared->layout.storage.u.chunk.ops->can_swim &&
+ (H5F_INTENT(dataset->oloc.file) & H5F_ACC_SWMR_WRITE)) {
+ int chunkno; /* Object header chunk index for message */
+
+ /* Get object header chunk index for dataspace message */
+ if((chunkno = H5O_msg_get_chunkno(&dataset->oloc, H5O_SDSPACE_ID, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to query dataspace chunk index")
+
+ /* Fail currently, if the dataspace message is not in chunk #0 */
+ /* (Note that this could be addressed by moving the dataspace message
+ * into chunk #0, but that can be hard and we're deferring that
+ * work for now. -QAK)
+ */
+ if(chunkno > 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dataspace chunk index must be 0 for SWMR access, chunkno = %d", chunkno)
+
+ /* Pin the object header */
+ if(NULL == (dataset->shared->oh = H5O_pin(&dataset->oloc, dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+
+ /* Lock dataspace message into chunk #0 */
+ if(H5O_msg_lock(&dataset->oloc, H5O_SDSPACE_ID, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTLOCK, FAIL, "can't lock dataspace message into object header chunk #0")
+
+ /* Indicate that dataset is set up for SWMR access */
+ dataset->shared->is_swimming = TRUE;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__swmr_setup() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__swmr_teardown
+ *
+ * Purpose: Tear down SWMR access for a chunked dataset.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, April 27, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__swmr_teardown(const H5D_t *dataset, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checking */
+ HDassert(dataset);
+ HDassert(dataset->shared->is_swimming);
+ HDassert(dataset->shared->oh);
+
+ /* Unlock dataspace message from chunk #0 */
+ if(H5O_msg_unlock(&dataset->oloc, H5O_SDSPACE_ID, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTUNLOCK, FAIL, "can't unlock dataspace message from object header chunk #0")
+
+ /* Release pointer to object header */
+ if(H5O_unpin(dataset->shared->oh) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
+ /* Indicate that dataset is NOT set up for SWMR access now */
+ dataset->shared->is_swimming = FALSE;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__swmr_teardown() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__update_oh_info
*
* Purpose: Create and fill object header for dataset
@@ -808,7 +914,6 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id)
H5O_loc_t *oloc = NULL; /* Dataset's object location */
H5O_layout_t *layout; /* Dataset's layout information */
H5T_t *type; /* Dataset's datatype */
- hbool_t use_latest_format; /* Flag indicating the newest file format should be used */
H5O_fill_t *fill_prop; /* Pointer to dataset's fill value information */
H5D_fill_value_t fill_status; /* Fill value status */
hbool_t fill_changed = FALSE; /* Flag indicating the fill value was changed */
@@ -827,9 +932,6 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id)
type = dset->shared->type;
fill_prop = &dset->shared->dcpl_cache.fill;
- /* Get the file's 'use the latest version of the format' flag */
- use_latest_format = H5F_USE_LATEST_FORMAT(file);
-
/* Retrieve "defined" status of fill value */
if(H5P_is_fill_value_defined(fill_prop, &fill_status) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined")
@@ -906,8 +1008,8 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update new fill value header message")
/* If there is valid information for the old fill value struct, add it */
- /* (only if we aren't trying to write the latest version of the file format) */
- if(fill_prop->buf && !use_latest_format) {
+ /* (only if we aren't trying to write the 'latest fill message version support') */
+ if(fill_prop->buf && !(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_FILL_MSG))) {
H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */
/* Shallow copy the fill value property */
@@ -953,13 +1055,17 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id)
#endif /* H5O_ENABLE_BOGUS */
/* Add a modification time message, if using older format. */
- /* (If using the latest format, the modification time is part of the object
+ /* (If using the latest 'no modification time message' version support, the modification time is part of the object
* header and doesn't use a separate message -QAK)
*/
- if(!use_latest_format)
+ if(!(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_NO_MOD_TIME_MSG)))
if(H5O_touch_oh(file, dxpl_id, oh, TRUE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update modification time message")
+ /* Set up SWMR writes to the dataset, if possible */
+ if(H5D__swmr_setup(dset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up SWMR access for dataset")
+
done:
/* Release pointer to object header itself */
if(oh != NULL)
@@ -1117,14 +1223,28 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
} /* end if */
/* Set the latest version of the layout, pline & fill messages, if requested */
- if(H5F_USE_LATEST_FORMAT(file)) {
+ if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DSET_MSG_FLAGS)) {
/* Set the latest version for the I/O pipeline message */
- if(H5O_pline_set_latest_version(&new_dset->shared->dcpl_cache.pline) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of I/O filter pipeline")
-
- /* Set the latest version for the fill value message */
- if(H5O_fill_set_latest_version(&new_dset->shared->dcpl_cache.fill) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of fill value")
+ if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_PLINE_MSG))
+ if(H5O_pline_set_latest_version(&new_dset->shared->dcpl_cache.pline) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of I/O filter pipeline")
+
+ /* Set the latest version for the fill message */
+ if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_FILL_MSG))
+ /* Set the latest version for the fill value message */
+ if(H5O_fill_set_latest_version(&new_dset->shared->dcpl_cache.fill) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of fill value")
+
+ /* Set the latest version for the layout message */
+ if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_LAYOUT_MSG))
+ /* Set the latest version for the layout message */
+ if(H5D__layout_set_latest_version(&new_dset->shared->layout, new_dset->shared->space, &new_dset->shared->dcpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of layout")
+ } /* end if */
+ else if(new_dset->shared->layout.version >= H5O_LAYOUT_VERSION_4) {
+ /* Use latest indexing type for layout message version >= 4 */
+ if(H5D__layout_set_latest_indexing(&new_dset->shared->layout, new_dset->shared->space, &new_dset->shared->dcpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest indexing")
} /* end if */
/* Check if this dataset is going into a parallel file and set space allocation time */
@@ -1146,6 +1266,9 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
/* Indicate that the layout information was initialized */
layout_init = TRUE;
+ if(H5D__append_flush_setup(new_dset, dapl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "unable to set up flush append property")
+
/* Add the dataset to the list of opened objects in the file */
if(H5FO_top_incr(new_dset->oloc.file, new_dset->oloc.addr) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, NULL, "can't incr object ref. count")
@@ -1364,6 +1487,86 @@ done:
} /* end H5D_open() */
+/*
+ *-------------------------------------------------------------------------
+ * Function: H5D__flush_append_setup
+ *
+ * Purpose:
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer:
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__append_flush_setup(H5D_t *dset, hid_t dapl_id)
+{
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* check args */
+ HDassert(dset);
+ HDassert(dset->shared);
+
+ /* Set default append flush values */
+ dset->shared->append_flush.ndims = 0;
+ dset->shared->append_flush.func = NULL;
+ dset->shared->append_flush.udata = NULL;
+ HDmemset(dset->shared->append_flush.boundary, 0, sizeof(dset->shared->append_flush.boundary));
+
+ if(dapl_id != H5P_DATASET_ACCESS_DEFAULT && dset->shared->layout.type == H5D_CHUNKED) {
+ H5P_genplist_t *dapl; /* data access property list object pointer */
+
+ /* Get dataset access property list */
+ if(NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for dapl ID");
+
+ /* Check if append flush property exists */
+ if(H5P_exist_plist(dapl, H5D_ACS_APPEND_FLUSH_NAME) > 0) {
+ H5D_append_flush_t info;
+
+ /* Get append flush property */
+ if(H5P_get(dapl, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get append flush info")
+ else if(info.ndims > 0) {
+ hsize_t curr_dims[H5S_MAX_RANK]; /* current dimension sizes */
+ hsize_t max_dims[H5S_MAX_RANK]; /* current dimension sizes */
+ int rank; /* dataspace # of dimensions */
+ unsigned u; /* local index variable */
+
+ /* Get dataset rank */
+ if((rank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, max_dims)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
+
+ if(info.ndims != (unsigned)rank)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "boundary dimension rank does not match dataset rank")
+
+ /* Validate boundary sizes */
+ for(u = 0; u < info.ndims; u++) {
+ if(info.boundary[u] != 0) /* when a non-zero boundary is set */
+ /* the dimension is extendible? */
+ if(max_dims[u] != H5S_UNLIMITED && max_dims[u] == curr_dims[u])
+ break;
+ } /* end for */
+
+ if(u != info.ndims) /* at least one boundary dimension is not extendible */
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "boundary dimension is not valid")
+
+ dset->shared->append_flush.ndims = info.ndims;
+ dset->shared->append_flush.func = info.func;
+ dset->shared->append_flush.udata = info.udata;
+ HDmemcpy(dset->shared->append_flush.boundary, info.boundary, sizeof(info.boundary));
+ } /* end else-if */
+ } /* end if */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__append_flush_setup() */
+
+
/*-------------------------------------------------------------------------
* Function: H5D__open_oid
*
@@ -1428,6 +1631,10 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id)
/* Indicate that the layout information was initialized */
layout_init = TRUE;
+ /* Set up flush append property */
+ if(H5D__append_flush_setup(dataset, dapl_id))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, FAIL, "unable to set up flush append property")
+
/* Point at dataset's copy, to cache it for later */
fill_prop = &dataset->shared->dcpl_cache.fill;
@@ -1503,6 +1710,10 @@ H5D__open_oid(H5D_t *dataset, hid_t dapl_id, hid_t dxpl_id)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize file storage")
} /* end if */
+ /* Set up SWMR writes to the dataset, if possible */
+ if(H5D__swmr_setup(dataset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up SWMR access for dataset")
+
done:
if(ret_value < 0) {
if(H5F_addr_defined(dataset->oloc.addr) && H5O_close(&(dataset->oloc)) < 0)
@@ -1548,6 +1759,7 @@ herr_t
H5D_close(H5D_t *dataset)
{
hbool_t free_failed = FALSE;
+ hbool_t corked; /* Whether the dataset is corked or not */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1654,6 +1866,18 @@ H5D_close(H5D_t *dataset)
(H5O_msg_reset(H5O_FILL_ID, &dataset->shared->dcpl_cache.fill) < 0) ||
(H5O_msg_reset(H5O_EFL_ID, &dataset->shared->dcpl_cache.efl) < 0);
+ /* Uncork cache entries with object address tag */
+ if(H5AC_cork(dataset->oloc.file, dataset->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve an object's cork status")
+ else if(corked)
+ if(H5AC_cork(dataset->oloc.file, dataset->oloc.addr, H5AC__UNCORK, NULL) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNCORK, FAIL, "unable to uncork an object")
+
+ /* If the dataset is opened for SWMR access, shut that down */
+ if(dataset->shared->is_swimming)
+ if(H5D__swmr_teardown(dataset, H5AC_dxpl_id) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to shut down SWMR access")
+
/*
* Release datatype, dataspace and creation property list -- there isn't
* much we can do if one of these fails, so we just continue.
@@ -2491,11 +2715,19 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
* and if the chunks are written
*-------------------------------------------------------------------------
*/
- if(shrink && H5D_CHUNKED == dset->shared->layout.type &&
- (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
- /* Remove excess chunks */
- if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+ if(H5D_CHUNKED == dset->shared->layout.type) {
+ if(shrink && (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
+ /* Remove excess chunks */
+ if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+
+ /* Update chunks that are no longer edge chunks as a result of
+ * expansion */
+ if(expand && (dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ && (dset->shared->dcpl_cache.pline.nused > 0))
+ if(H5D__chunk_update_old_edge_chunks(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to do update old edge chunks")
+ } /* end if */
/* Mark the dataspace as dirty, for later writing to the file */
if(H5D__mark(dset, dxpl_id, H5D_MARK_SPACE) < 0)
@@ -2586,6 +2818,101 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__format_convert
+ *
+ * Purpose: To convert a dataset's chunk indexing type to version 1 btree
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__format_convert(H5D_t *dataset, hid_t dxpl_id)
+{
+ H5O_t *oh = NULL; /* Pointer to dataset's object header */
+ H5D_chk_idx_info_t new_idx_info; /* Index info for the new layout */
+ H5D_chk_idx_info_t idx_info; /* Index info for the current layout */
+ H5O_layout_t newlayout; /* The new layout */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE_TAG(dxpl_id, dataset->oloc.addr, FAIL)
+
+ /* Check args */
+ HDassert(dataset);
+
+ /* Set up the current index info */
+ idx_info.f = dataset->oloc.file;
+ idx_info.dxpl_id = dxpl_id;
+ idx_info.pline = &dataset->shared->dcpl_cache.pline;
+ idx_info.layout = &dataset->shared->layout.u.chunk;
+ idx_info.storage = &dataset->shared->layout.storage.u.chunk;
+
+ /* Copy the current layout info to the new layout */
+ HDmemcpy(&newlayout, &dataset->shared->layout, sizeof(H5O_layout_t));
+
+ /* Set up info for version 1 B-tree in the new layout */
+ newlayout.version = H5O_LAYOUT_VERSION_3;
+ newlayout.storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
+ newlayout.storage.u.chunk.idx_addr = HADDR_UNDEF;
+ newlayout.storage.u.chunk.ops = H5D_COPS_BTREE;
+ newlayout.storage.u.chunk.u.btree.shared = NULL;
+
+ /* Set up the index info to version 1 B-tree */
+ new_idx_info.f = dataset->oloc.file;
+ new_idx_info.dxpl_id = dxpl_id;
+ new_idx_info.pline = &dataset->shared->dcpl_cache.pline;
+ new_idx_info.layout = &newlayout.u.chunk;
+ new_idx_info.storage = &newlayout.storage.u.chunk;
+
+ /* Initialize version 1 B-tree */
+ if(newlayout.storage.u.chunk.ops->init &&
+ (newlayout.storage.u.chunk.ops->init)(&new_idx_info, dataset->shared->space, dataset->oloc.addr) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize indexing information")
+
+ /* If the current chunk index exists */
+ if(H5F_addr_defined(dataset->shared->layout.storage.u.chunk.idx_addr)) {
+ /* Create v1 B-tree chunk index */
+ if((newlayout.storage.u.chunk.ops->create)(&new_idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create chunk index")
+
+ /* Iterate over the chunks in the current index and insert the chunk addresses
+ * into the version 1 B-tree chunk index */
+ if(H5D__chunk_format_convert(dataset, &idx_info, &new_idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to chunk info")
+ } /* end if */
+
+ /* Release the old (i.e. current) chunk index */
+ if(dataset->shared->layout.storage.u.chunk.ops->dest &&
+ (dataset->shared->layout.storage.u.chunk.ops->dest)(&idx_info) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to release chunk index info")
+
+ /* Delete the "layout" message */
+ if(H5O_msg_remove(&dataset->oloc, H5O_LAYOUT_ID, H5O_ALL, TRUE, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete layout message")
+
+ HDmemcpy(&dataset->shared->layout, &newlayout, sizeof(H5O_layout_t));
+
+ if(NULL == (oh = H5O_pin(&dataset->oloc, dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTPIN, FAIL, "unable to pin dataset object header")
+
+ /* Append the new layout message to the object header */
+ if(H5O_msg_append_oh(dataset->oloc.file, dxpl_id, oh, H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, &newlayout) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update old fill value header message")
+
+done:
+ /* Release pointer to object header */
+ if(oh != NULL)
+ if(H5O_unpin(oh) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTUNPIN, FAIL, "unable to unpin dataset object header")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5D__format_convert() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__mark
*
* Purpose: Mark some aspect of a dataset as dirty
@@ -2941,12 +3268,21 @@ H5D_get_access_plist(H5D_t *dset)
/* If the dataset is chunked then copy the rdcc parameters */
if (dset->shared->layout.type == H5D_CHUNKED) {
+ H5D_append_flush_t info;
+
if (H5P_set(new_plist, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &(dset->shared->cache.chunk.nslots)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set data cache number of slots")
if (H5P_set(new_plist, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &(dset->shared->cache.chunk.nbytes_max)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set data cache byte size")
if (H5P_set(new_plist, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &(dset->shared->cache.chunk.w0)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set preempt read chunks")
+
+ info.ndims = dset->shared->append_flush.ndims;
+ info.func = dset->shared->append_flush.func;
+ info.udata = dset->shared->append_flush.udata;
+ HDmemcpy(info.boundary, dset->shared->append_flush.boundary, sizeof(dset->shared->append_flush.boundary));
+ if(H5P_set(new_plist, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set append flush property")
} /* end if */
/* Set the return value */
@@ -3058,3 +3394,57 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_get_type() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__refresh
+ *
+ * Purpose: Refreshes all buffers associated with a dataset.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * November 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__refresh(hid_t dset_id, H5D_t *dset, hid_t dxpl_id)
+{
+ H5D_virtual_held_file_t *head = NULL; /* Pointer to list of files held open */
+ hbool_t virt_dsets_held = FALSE; /* Whether virtual datasets' files are held open */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(dset);
+
+ /* If the layout is virtual... */
+ if(dset->shared->layout.type == H5D_VIRTUAL) {
+ /* Hold open the source datasets' files */
+ if(H5D__virtual_hold_source_dset_files(dset, &head) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINC, FAIL, "unable to hold VDS source files open")
+ virt_dsets_held = TRUE;
+
+ /* Refresh source datasets for virtual dataset */
+ if(H5D__virtual_refresh_source_dsets(dset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh VDS source datasets")
+ } /* end if */
+
+ /* Refresh dataset object */
+ if((H5O_refresh_metadata(dset_id, dset->oloc, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh dataset")
+
+done:
+ /* Release hold on virtual datasets' files */
+ if(virt_dsets_held) {
+ /* Sanity check */
+ HDassert(dset->shared->layout.type == H5D_VIRTUAL);
+
+ /* Release the hold on source datasets' files */
+ if(H5D__virtual_release_source_dset_files(head) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't release VDS source files held open")
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__refresh() */
+
diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c
index 8676d7c..37cf2cc 100644
--- a/src/H5Dlayout.c
+++ b/src/H5Dlayout.c
@@ -96,8 +96,36 @@ H5D__layout_set_io_ops(const H5D_t *dataset)
dataset->shared->layout.ops = H5D_LOPS_CHUNK;
/* Set the chunk operations */
- /* (Only "B-tree" indexing type currently supported) */
- dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_BTREE;
+ switch(dataset->shared->layout.u.chunk.idx_type) {
+ case H5D_CHUNK_IDX_BTREE:
+ dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_BTREE;
+ break;
+
+ case H5D_CHUNK_IDX_NONE:
+ dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_NONE;
+ break;
+
+ case H5D_CHUNK_IDX_SINGLE:
+ dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_SINGLE;
+ break;
+
+ case H5D_CHUNK_IDX_FARRAY:
+ dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_FARRAY;
+ break;
+
+ case H5D_CHUNK_IDX_EARRAY:
+ dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_EARRAY;
+ break;
+
+ case H5D_CHUNK_IDX_BT2:
+ dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_BT2;
+ break;
+
+ case H5D_CHUNK_IDX_NTYPES:
+ default:
+ HDassert(0 && "Unknown chunk index method!");
+ HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unknown chunk index method")
+ } /* end switch */
break;
case H5D_COMPACT:
@@ -150,6 +178,7 @@ H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, hbool_t includ
switch(layout->type) {
case H5D_COMPACT:
+ /* This information only present in older versions of message */
/* Size of raw data */
ret_value += 2;
if(include_compact_data)
@@ -157,20 +186,80 @@ H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, hbool_t includ
break;
case H5D_CONTIGUOUS:
+ /* This information only present in older versions of message */
ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
ret_value += H5F_SIZEOF_SIZE(f); /* Length of data */
break;
case H5D_CHUNKED:
- /* Number of dimensions (1 byte) */
- HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
- ret_value++;
+ if(layout->version < H5O_LAYOUT_VERSION_4) {
+ /* Number of dimensions (1 byte) */
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ ret_value++;
- /* Dimension sizes */
- ret_value += layout->u.chunk.ndims * 4;
+ /* B-tree address */
+ ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
- /* B-tree address */
- ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
+ /* Dimension sizes */
+ ret_value += layout->u.chunk.ndims * 4;
+ } /* end if */
+ else {
+ /* Chunked layout feature flags */
+ ret_value++;
+
+ /* Number of dimensions (1 byte) */
+ HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ ret_value++;
+
+ /* Encoded # of bytes for each chunk dimension */
+ HDassert(layout->u.chunk.enc_bytes_per_dim > 0 && layout->u.chunk.enc_bytes_per_dim <= 8);
+ ret_value++;
+
+ /* Dimension sizes */
+ ret_value += layout->u.chunk.ndims * layout->u.chunk.enc_bytes_per_dim;
+
+ /* Type of chunk index */
+ ret_value++;
+
+ switch(layout->u.chunk.idx_type) {
+ case H5D_CHUNK_IDX_BTREE:
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, 0, "v1 B-tree index type found for layout message >v3")
+
+ case H5D_CHUNK_IDX_NONE:
+ /* nothing */
+ break;
+
+ case H5D_CHUNK_IDX_SINGLE:
+ /* Possible filter information */
+ if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) {
+ ret_value += H5F_SIZEOF_SIZE(f); /* Size of chunk (in file) */
+ ret_value += 4; /* Filter mask for chunk */
+ } /* end if */
+ break;
+
+ case H5D_CHUNK_IDX_FARRAY:
+ /* Fixed array creation parameters */
+ ret_value += H5D_FARRAY_CREATE_PARAM_SIZE;
+ break;
+
+ case H5D_CHUNK_IDX_EARRAY:
+ /* Extensible array creation parameters */
+ ret_value += H5D_EARRAY_CREATE_PARAM_SIZE;
+ break;
+
+ case H5D_CHUNK_IDX_BT2:
+ /* v2 B-tree creation parameters */
+ ret_value += H5D_BT2_CREATE_PARAM_SIZE;
+ break;
+
+ case H5D_CHUNK_IDX_NTYPES:
+ default:
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, 0, "Invalid chunk index type")
+ } /* end switch */
+
+ /* Chunk index address */
+ ret_value += H5F_SIZEOF_ADDR(f);
+ } /* end else */
break;
case H5D_VIRTUAL:
@@ -190,6 +279,170 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__layout_set_latest_version
+ *
+ * Purpose: Set the encoding for a layout to the latest version.
+ * Part of the coding in this routine is moved to
+ * H5D__layout_set_latest_indexing().
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 15, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__layout_set_latest_version(H5O_layout_t *layout, const H5S_t *space,
+ const H5D_dcpl_cache_t *dcpl_cache)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(layout);
+ HDassert(space);
+ HDassert(dcpl_cache);
+
+ /* Set encoding of layout to latest version */
+ layout->version = H5O_LAYOUT_VERSION_LATEST;
+
+ /* Set the latest indexing type for the layout message */
+ if(H5D__layout_set_latest_indexing(layout, space, dcpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest indexing type")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__layout_set_latest_version() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__layout_set_latest_indexing
+ *
+ * Purpose: Set the latest indexing type for a layout message
+ * This is moved from H5D_layout_set_latest_version().
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, January 15, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space,
+ const H5D_dcpl_cache_t *dcpl_cache)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(layout);
+ HDassert(space);
+ HDassert(dcpl_cache);
+
+ /* The indexing methods only apply to chunked datasets (currently) */
+ if(layout->type == H5D_CHUNKED) {
+ int sndims; /* Rank of dataspace */
+ unsigned ndims; /* Rank of dataspace */
+
+ /* Query the dimensionality of the dataspace */
+ if((sndims = H5S_GET_EXTENT_NDIMS(space)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "invalid dataspace rank")
+ ndims = (unsigned)sndims;
+
+ /* Avoid scalar/null dataspace */
+ if(ndims > 0) {
+ hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Maximum dimension sizes */
+ hsize_t cur_dims[H5O_LAYOUT_NDIMS]; /* Current dimension sizes */
+ unsigned unlim_count = 0; /* Count of unlimited max. dimensions */
+ hbool_t single = TRUE; /* Fulfill single chunk indexing */
+ unsigned u; /* Local index variable */
+
+ /* Query the dataspace's dimensions */
+ if(H5S_get_simple_extent_dims(space, cur_dims, max_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace max. dimensions")
+
+ /* Spin through the max. dimensions, looking for unlimited dimensions */
+ for(u = 0; u < ndims; u++) {
+ if(max_dims[u] == H5S_UNLIMITED)
+ unlim_count++;
+ if(cur_dims[u] != max_dims[u] || cur_dims[u] != layout->u.chunk.dim[u])
+ single = FALSE;
+ } /* end for */
+
+ /* Chunked datasets with unlimited dimension(s) */
+ if(unlim_count) { /* dataset with unlimited dimension(s) must be chunked */
+ if(1 == unlim_count) { /* Chunked dataset with only 1 unlimited dimension */
+ /* Set the chunk index type to an extensible array */
+ layout->u.chunk.idx_type = H5D_CHUNK_IDX_EARRAY;
+ layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_EARRAY;
+ layout->storage.u.chunk.ops = H5D_COPS_EARRAY;
+
+ /* Set the extensible array creation parameters */
+ /* (use hard-coded defaults for now, until we give applications
+ * control over this with a property list - QAK)
+ */
+ layout->u.chunk.u.earray.cparam.max_nelmts_bits = H5D_EARRAY_MAX_NELMTS_BITS;
+ layout->u.chunk.u.earray.cparam.idx_blk_elmts = H5D_EARRAY_IDX_BLK_ELMTS;
+ layout->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs = H5D_EARRAY_SUP_BLK_MIN_DATA_PTRS;
+ layout->u.chunk.u.earray.cparam.data_blk_min_elmts = H5D_EARRAY_DATA_BLK_MIN_ELMTS;
+ layout->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits = H5D_EARRAY_MAX_DBLOCK_PAGE_NELMTS_BITS;
+ } /* end if */
+ else { /* Chunked dataset with > 1 unlimited dimensions */
+ /* Set the chunk index type to v2 B-tree */
+ layout->u.chunk.idx_type = H5D_CHUNK_IDX_BT2;
+ layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BT2;
+ layout->storage.u.chunk.ops = H5D_COPS_BT2;
+
+ /* Set the v2 B-tree creation parameters */
+ /* (use hard-coded defaults for now, until we give applications
+ * control over this with a property list - QAK)
+ */
+ layout->u.chunk.u.btree2.cparam.node_size = H5D_BT2_NODE_SIZE;
+ layout->u.chunk.u.btree2.cparam.split_percent = H5D_BT2_SPLIT_PERC;
+ layout->u.chunk.u.btree2.cparam.merge_percent = H5D_BT2_MERGE_PERC;
+ } /* end else */
+ } /* end if */
+ else { /* Chunked dataset with fixed dimensions */
+ /* Check for correct condition for using "single chunk" chunk index */
+ if(single) {
+ layout->u.chunk.idx_type = H5D_CHUNK_IDX_SINGLE;
+ layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_SINGLE;
+ layout->storage.u.chunk.ops = H5D_COPS_SINGLE;
+ } /* end if */
+ else if(!dcpl_cache->pline.nused &&
+ dcpl_cache->fill.alloc_time == H5D_ALLOC_TIME_EARLY) {
+
+ /* Set the chunk index type to "none" Index */
+ layout->u.chunk.idx_type = H5D_CHUNK_IDX_NONE;
+ layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_NONE;
+ layout->storage.u.chunk.ops = H5D_COPS_NONE;
+ } /* end else-if */
+ else { /* Used Fixed Array */
+ /* Set the chunk index type to Fixed Array */
+ layout->u.chunk.idx_type = H5D_CHUNK_IDX_FARRAY;
+ layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_FARRAY;
+ layout->storage.u.chunk.ops = H5D_COPS_FARRAY;
+
+ /* Set the fixed array creation parameters */
+ /* (use hard-coded defaults for now, until we give applications
+ * control over this with a property list - QAK)
+ */
+ layout->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits = H5D_FARRAY_MAX_DBLK_PAGE_NELMTS_BITS;
+ } /* end else */
+ } /* end else */
+ } /* end if */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__layout_set_latest_indexing() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__layout_oh_create
*
* Purpose: Create layout/pline/efl information for dataset
@@ -206,8 +459,9 @@ herr_t
H5D__layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh, H5D_t *dset,
hid_t dapl_id)
{
- H5O_layout_t *layout; /* Dataset's layout information */
+ H5O_layout_t *layout; /* Dataset's layout information */
const H5O_fill_t *fill_prop; /* Pointer to dataset's fill value information */
+ unsigned layout_mesg_flags; /* Flags for inserting layout message */
hbool_t layout_init = FALSE; /* Flag to indicate that chunk information was initialized */
herr_t ret_value = SUCCEED; /* Return value */
@@ -296,9 +550,14 @@ H5D__layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh, H5D_t *dset,
} /* end if */
/* Create layout message */
- /* (Don't make layout message constant unless allocation time is early, since space may not be allocated) */
+ /* (Don't make layout message constant unless allocation time is early and non-filtered, since space may not be allocated) */
/* (Note: this is relying on H5D__alloc_storage not calling H5O_msg_write during dataset creation) */
- if(H5O_msg_append_oh(file, dxpl_id, oh, H5O_LAYOUT_ID, ((fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY && H5D_COMPACT != layout->type) ? H5O_MSG_FLAG_CONSTANT : 0), 0, layout) < 0)
+ if(fill_prop->alloc_time == H5D_ALLOC_TIME_EARLY && H5D_COMPACT != layout->type
+ && !dset->shared->dcpl_cache.pline.nused)
+ layout_mesg_flags = H5O_MSG_FLAG_CONSTANT;
+ else
+ layout_mesg_flags = 0;
+ if(H5O_msg_append_oh(file, dxpl_id, oh, H5O_LAYOUT_ID, layout_mesg_flags, 0, layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout")
done:
@@ -401,7 +660,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5D__layout_oh_write
*
- * Purpose: Write layout/pline/efl information for dataset
+ * Purpose: Write layout information for dataset
*
* Return: Success: SUCCEED
* Failure: FAIL
@@ -414,6 +673,7 @@ done:
herr_t
H5D__layout_oh_write(H5D_t *dataset, hid_t dxpl_id, H5O_t *oh, unsigned update_flags)
{
+ htri_t msg_exists; /* Whether the layout message exists */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -422,9 +682,14 @@ H5D__layout_oh_write(H5D_t *dataset, hid_t dxpl_id, H5O_t *oh, unsigned update_f
HDassert(dataset);
HDassert(oh);
- /* Write the layout message to the dataset's header */
- if(H5O_msg_write_oh(dataset->oloc.file, dxpl_id, oh, H5O_LAYOUT_ID, H5O_MSG_FLAG_CONSTANT, update_flags, &dataset->shared->layout) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update layout message")
+ /* Check if the layout message has been added to the dataset's header */
+ if((msg_exists = H5O_msg_exists_oh(oh, H5O_LAYOUT_ID)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to check if layout message exists")
+ if(msg_exists) {
+ /* Write the layout message to the dataset's header */
+ if(H5O_msg_write_oh(dataset->oloc.file, dxpl_id, oh, H5O_LAYOUT_ID, 0, update_flags, &dataset->shared->layout) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to update layout message")
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Dnone.c b/src/H5Dnone.c
new file mode 100644
index 0000000..0cadac2
--- /dev/null
+++ b/src/H5Dnone.c
@@ -0,0 +1,497 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
+ * September 2010
+ *
+ * Purpose: Implicit (Non Index) chunked I/O functions.
+ * This is used when the dataset is:
+ * extendible but with fixed max. dims
+ * with early allocation
+ * without filter
+ * The chunk coordinate is mapped into the actual disk addresses
+ * for the chunk without indexing.
+ *
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Dmodule.h" /* This source code file is part of the H5D module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Dpkg.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File space management */
+#include "H5VMprivate.h" /* Vector functions */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Non Index chunking I/O ops */
+static herr_t H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info);
+static hbool_t H5D__none_idx_is_space_alloc(const H5O_storage_chunk_t *storage);
+static herr_t H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata);
+static int H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata);
+static herr_t H5D__none_idx_remove(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata);
+static herr_t H5D__none_idx_delete(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__none_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst);
+static herr_t H5D__none_idx_size(const H5D_chk_idx_info_t *idx_info,
+ hsize_t *size);
+static herr_t H5D__none_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr);
+static herr_t H5D__none_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Non Index chunk I/O ops */
+const H5D_chunk_ops_t H5D_COPS_NONE[1] = {{
+ FALSE, /* Non-indexed chunking don't current support SWMR access */
+ NULL, /* init */
+ H5D__none_idx_create, /* create */
+ H5D__none_idx_is_space_alloc, /* is_space_alloc */
+ NULL, /* insert */
+ H5D__none_idx_get_addr, /* get_addr */
+ NULL, /* resize */
+ H5D__none_idx_iterate, /* iterate */
+ H5D__none_idx_remove, /* remove */
+ H5D__none_idx_delete, /* delete */
+ H5D__none_idx_copy_setup, /* copy_setup */
+ NULL, /* copy_shutdown */
+ H5D__none_idx_size, /* size */
+ H5D__none_idx_reset, /* reset */
+ H5D__none_idx_dump, /* dump */
+ NULL /* dest */
+}};
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_create
+ *
+ * Purpose: Allocate memory for the maximum # of chunks in the dataset.
+ *
+ * Return: Non-negative on success
+ * Negative on failure.
+ *
+ * Programmer: Vailin Choi; September 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_create(const H5D_chk_idx_info_t *idx_info)
+{
+ hsize_t nbytes; /* Total size of dataset chunks */
+ haddr_t addr; /* The address of dataset chunks */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->pline->nused == 0); /* Shouldn't have filter defined on entering here */
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(idx_info->layout->max_nchunks);
+ HDassert(!H5F_addr_defined(idx_info->storage->idx_addr)); /* address of data shouldn't be defined */
+
+ /* Calculate size of max dataset chunks */
+ nbytes = idx_info->layout->max_nchunks * idx_info->layout->size;
+
+ /* Allocate space for max dataset chunks */
+ addr = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, nbytes);
+ if(!H5F_addr_defined(addr))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed")
+
+ /* This is the address of the dataset chunks */
+ idx_info->storage->idx_addr = addr;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__none_idx_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_is_space_alloc
+ *
+ * Purpose: Query if space for the dataset chunks is allocated
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; September 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__none_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr))
+} /* end H5D__none_idx_is_space_alloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_get_addr
+ *
+ * Purpose: Get the file address of a chunk.
+ * Save the retrieved information in the udata supplied.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->pline->nused == 0);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(udata);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+
+ /* Calculate the index of this chunk */
+ udata->chunk_idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, udata->common.scaled);
+
+ /* Calculate the address of the chunk */
+ udata->chunk_block.offset = idx_info->storage->idx_addr + udata->chunk_idx * idx_info->layout->size;
+
+ /* Update the other (constant) information for the chunk */
+ udata->chunk_block.length = idx_info->layout->size;
+ udata->filter_mask = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__none_idx_get_addr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_iterate
+ *
+ * Purpose: Iterate over the chunks in an index, making a callback
+ * for each one.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; September 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__none_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata)
+{
+ H5D_chunk_rec_t chunk_rec; /* generic chunk record */
+ unsigned ndims; /* Rank of chunk */
+ unsigned u; /* Local index variable */
+ int curr_dim; /* Current rank */
+ hsize_t idx; /* Array index of chunk */
+ int ret_value = -1; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(!idx_info->pline->nused);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(chunk_cb);
+ HDassert(chunk_udata);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+
+ /* Initialize generic chunk record */
+ HDmemset(&chunk_rec, 0, sizeof(chunk_rec));
+ chunk_rec.nbytes = idx_info->layout->size;
+ chunk_rec.filter_mask = 0;
+
+ ndims = idx_info->layout->ndims - 1;
+ HDassert(ndims > 0);
+
+ /* Iterate over all the chunks in the dataset's dataspace */
+ for(u = 0; u < idx_info->layout->nchunks; u++) {
+ /* Calculate the index of this chunk */
+ idx = H5VM_array_offset_pre(ndims, idx_info->layout->max_down_chunks, chunk_rec.scaled);
+
+ /* Calculate the address of the chunk */
+ chunk_rec.chunk_addr = idx_info->storage->idx_addr + idx * idx_info->layout->size;
+
+ /* Make "generic chunk" callback */
+ if((ret_value = (*chunk_cb)(&chunk_rec, chunk_udata)) < 0)
+ HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback");
+
+ /* Update coordinates of chunk in dataset */
+ curr_dim = (int)(ndims - 1);
+ while(curr_dim >= 0) {
+ /* Increment coordinate in current dimension */
+ chunk_rec.scaled[curr_dim]++;
+
+ /* Check if we went off the end of the current dimension */
+ if(chunk_rec.scaled[curr_dim] >= idx_info->layout->chunks[curr_dim]) {
+ /* Reset coordinate & move to next faster dimension */
+ chunk_rec.scaled[curr_dim] = 0;
+ curr_dim--;
+ } /* end if */
+ else
+ break;
+ } /* end while */
+ } /* end for */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__none_idx_iterate() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_remove
+ *
+ * Purpose: Remove chunk from index.
+ *
+ * Note: Chunks can't be removed (or added) to datasets with this
+ * form of index - all the space for all the chunks is always
+ * allocated in the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_remove(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, H5D_chunk_common_ud_t H5_ATTR_UNUSED *udata)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* NO OP */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__none_idx_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_delete
+ *
+ * Purpose: Delete raw data storage for entire dataset (i.e. all chunks)
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_delete(const H5D_chk_idx_info_t *idx_info)
+{
+ hsize_t nbytes; /* Size of all chunks */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(!idx_info->pline->nused); /* Shouldn't have filter defined on entering here */
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); /* should be defined */
+
+ /* chunk size * max # of chunks */
+ nbytes = idx_info->layout->max_nchunks * idx_info->layout->size;
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, idx_info->storage->idx_addr, nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free dataset chunks")
+
+ idx_info->storage->idx_addr = HADDR_UNDEF;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__none_idx_delete() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_copy_setup
+ *
+ * Purpose: Set up any necessary information for copying chunks
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info_src);
+ HDassert(idx_info_src->f);
+ HDassert(idx_info_src->pline);
+ HDassert(!idx_info_src->pline->nused);
+ HDassert(idx_info_src->layout);
+ HDassert(idx_info_src->storage);
+ HDassert(H5F_addr_defined(idx_info_src->storage->idx_addr));
+
+ HDassert(idx_info_dst);
+ HDassert(idx_info_dst->f);
+ HDassert(idx_info_dst->pline);
+ HDassert(!idx_info_dst->pline->nused);
+ HDassert(idx_info_dst->layout);
+ HDassert(idx_info_dst->storage);
+
+ /* Set copied metadata tag */
+ H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL);
+
+ /* Allocate dataset chunks in the dest. file */
+ if(H5D__none_idx_create(idx_info_dst) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
+
+ /* Reset metadata tag */
+ H5_END_TAG(FAIL);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__none_idx_copy_setup() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_size
+ *
+ * Purpose: Retrieve the amount of index storage for chunked dataset
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_size(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, hsize_t *index_size)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(index_size);
+
+ *index_size = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__none_idx_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_reset
+ *
+ * Purpose: Reset indexing information.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ /* Reset index info */
+ if(reset_addr)
+ storage->idx_addr = HADDR_UNDEF;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__none_idx_reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__none_idx_dump
+ *
+ * Purpose: Dump
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; September 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__none_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+ HDassert(stream);
+
+ HDfprintf(stream, " Address: %a\n", storage->idx_addr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__none_idx_dump() */
+
diff --git a/src/H5Doh.c b/src/H5Doh.c
index 8b70362..2836ef2 100644
--- a/src/H5Doh.c
+++ b/src/H5Doh.c
@@ -55,7 +55,7 @@ static void *H5O__dset_create(H5F_t *f, void *_crt_info, H5G_loc_t *obj_loc,
static H5O_loc_t *H5O__dset_get_oloc(hid_t obj_id);
static herr_t H5O__dset_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh,
H5_ih_info_t *bh_info);
-static herr_t H5O__dset_flush(H5G_loc_t *obj_loc, hid_t dxpl_id);
+static herr_t H5O__dset_flush(void *_obj_ptr, hid_t dxpl_id);
/*********************/
@@ -156,6 +156,10 @@ H5O__dset_free_copy_file_udata(void *_udata)
if(udata->common.src_pline)
H5O_msg_free(H5O_PLINE_ID, udata->common.src_pline);
+ /* Release copy of dataset's layout, if it was set */
+ if(udata->src_layout)
+ H5O_msg_free(H5O_LAYOUT_ID, udata->src_layout);
+
/* Release space for 'copy file' user data */
udata = H5FL_FREE(H5D_copy_file_ud_t, udata);
@@ -443,33 +447,27 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O__dset_flush(H5G_loc_t *obj_loc, hid_t dxpl_id)
+H5O__dset_flush(void *_obj_ptr, hid_t dxpl_id)
{
- H5D_t *dset = NULL; /* Dataset opened */
+ H5D_t *dset = (H5D_t *)_obj_ptr; /* Pointer to dataset object */
H5O_type_t obj_type; /* Type of object at location */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- HDassert(obj_loc);
- HDassert(obj_loc->oloc);
+ HDassert(dset);
+ HDassert(&dset->oloc);
/* Check that the object found is the correct type */
- if(H5O_obj_type(obj_loc->oloc, &obj_type, dxpl_id) < 0)
+ if(H5O_obj_type(&dset->oloc, &obj_type, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get object type")
if(obj_type != H5O_TYPE_DATASET)
HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not a dataset")
- /* Open the dataset */
- if(NULL == (dset = H5D_open(obj_loc, H5P_DATASET_ACCESS_DEFAULT, dxpl_id)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open dataset")
-
if(H5D__flush_real(dset, dxpl_id) < 0)
HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info")
done:
- if(dset && H5D_close(dset) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataset")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O__dset_flush() */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index e9e87af..248808d 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -33,6 +33,7 @@
/* Other private headers needed by this file */
#include "H5ACprivate.h" /* Metadata cache */
+#include "H5B2private.h" /* v2 B-trees */
#include "H5Fprivate.h" /* File access */
#include "H5Gprivate.h" /* Groups */
#include "H5SLprivate.h" /* Skip lists */
@@ -65,6 +66,27 @@
#define H5D_MARK_SPACE 0x01
#define H5D_MARK_LAYOUT 0x02
+/* Default creation parameters for chunk index data structures */
+/* See H5O_layout_chunk_t */
+
+/* Fixed array creation values */
+#define H5D_FARRAY_CREATE_PARAM_SIZE 1 /* Size of the creation parameters in bytes */
+#define H5D_FARRAY_MAX_DBLK_PAGE_NELMTS_BITS 10 /* i.e. 1024 elements per data block page */
+
+/* Extensible array creation values */
+#define H5D_EARRAY_CREATE_PARAM_SIZE 5 /* Size of the creation parameters in bytes */
+#define H5D_EARRAY_MAX_NELMTS_BITS 32 /* i.e. 4 giga-elements */
+#define H5D_EARRAY_IDX_BLK_ELMTS 4
+#define H5D_EARRAY_SUP_BLK_MIN_DATA_PTRS 4
+#define H5D_EARRAY_DATA_BLK_MIN_ELMTS 16
+#define H5D_EARRAY_MAX_DBLOCK_PAGE_NELMTS_BITS 10 /* i.e. 1024 elements per data block page */
+
+/* v2 B-tree creation values for raw meta_size */
+#define H5D_BT2_CREATE_PARAM_SIZE 6 /* Size of the creation parameters in bytes */
+#define H5D_BT2_NODE_SIZE 512
+#define H5D_BT2_SPLIT_PERC 100
+#define H5D_BT2_MERGE_PERC 40
+
/****************************/
/* Package Private Typedefs */
@@ -240,7 +262,7 @@ typedef struct H5D_chk_idx_info_t {
typedef struct H5D_chunk_rec_t {
hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Logical offset to start */
uint32_t nbytes; /* Size of stored data */
- unsigned filter_mask; /* Excluded filters */
+ uint32_t filter_mask; /* Excluded filters */
haddr_t chunk_addr; /* Address of chunk in file */
} H5D_chunk_rec_t;
@@ -254,6 +276,9 @@ typedef struct H5D_chunk_common_ud_t {
const H5O_layout_chunk_t *layout; /* Chunk layout description */
const H5O_storage_chunk_t *storage; /* Chunk storage description */
const hsize_t *scaled; /* Scaled coordinates for a chunk */
+ const struct H5D_rdcc_t *rdcc; /* Chunk cache. Only necessary if the index may
+ * be modified, and if any chunks in the dset
+ * may be cached */
} H5D_chunk_common_ud_t;
/* B-tree callback info for various operations */
@@ -264,6 +289,9 @@ typedef struct H5D_chunk_ud_t {
unsigned idx_hint; /*index of chunk in cache, if present */
H5F_block_t chunk_block; /*offset/length of chunk in file */
unsigned filter_mask; /*excluded filters */
+ hbool_t new_unfilt_chunk; /*whether the chunk just became unfiltered */
+ hsize_t chunk_idx; /*chunk index for EA, FA indexing */
+ hbool_t need_modify;
} H5D_chunk_ud_t;
/* Typedef for "generic" chunk callbacks */
@@ -298,6 +326,7 @@ typedef herr_t (*H5D_chunk_dest_func_t)(const H5D_chk_idx_info_t *idx_info);
/* Typedef for grouping chunk I/O routines */
typedef struct H5D_chunk_ops_t {
+ hbool_t can_swim; /* Flag to indicate that the index supports SWMR access */
H5D_chunk_init_func_t init; /* Routine to initialize indexing information in memory */
H5D_chunk_create_func_t create; /* Routine to create chunk index */
H5D_chunk_is_space_alloc_func_t is_space_alloc; /* Query routine to determine if storage/index is allocated */
@@ -363,9 +392,20 @@ typedef struct H5D_chunk_cached_t {
hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled offset of chunk*/
haddr_t addr; /*file address of chunk */
uint32_t nbytes; /*size of stored data */
+ hsize_t chunk_idx; /*index of chunk in dataset */
unsigned filter_mask; /*excluded filters */
} H5D_chunk_cached_t;
+/****************************/
+/* Virtual dataset typedefs */
+/****************************/
+
+/* List of files held open during refresh operations */
+typedef struct H5D_virtual_held_file_t {
+ H5F_t *file; /* Pointer to file held open */
+ struct H5D_virtual_held_file_t *next; /* Pointer to next node in list */
+} H5D_virtual_held_file_t;
+
/* The raw data chunk cache */
typedef struct H5D_rdcc_t {
struct {
@@ -379,6 +419,7 @@ typedef struct H5D_rdcc_t {
double w0; /* Chunk preemption policy */
struct H5D_rdcc_ent_t *head; /* Head of doubly linked list */
struct H5D_rdcc_ent_t *tail; /* Tail of doubly linked list */
+ struct H5D_rdcc_ent_t *tmp_head; /* Head of temporary doubly linked list. Chunks on this list are not in the hash table (slot). The head entry is a sentinel (does not refer to an actual chunk). */
size_t nbytes_used; /* Current cached raw data in bytes */
int nused; /* Number of chunk slots in use */
H5D_chunk_cached_t last; /* Cached copy of last chunk information */
@@ -414,14 +455,19 @@ typedef struct H5D_shared_t {
hid_t type_id; /* ID for dataset's datatype */
H5T_t *type; /* Datatype for this dataset */
H5S_t *space; /* Dataspace of this dataset */
+ hbool_t space_dirty; /* Whether the dataspace info needs to be flushed to the file */
+ hbool_t layout_dirty; /* Whether the layout info needs to be flushed to the file */
hid_t dcpl_id; /* Dataset creation property id */
H5D_dcpl_cache_t dcpl_cache; /* Cached DCPL values */
H5O_layout_t layout; /* Data layout */
hbool_t checked_filters;/* TRUE if dataset passes can_apply check */
+ H5O_t *oh; /* Pointer to dataset's object header, pinned */
+ hbool_t is_swimming; /* TRUE if dataset has SWMR access enabled */
/* Cached dataspace info */
unsigned ndims; /* The dataset's dataspace rank */
hsize_t curr_dims[H5S_MAX_RANK]; /* The curr. size of dataset dimensions */
+ hsize_t curr_power2up[H5S_MAX_RANK]; /* The curr. dim sizes, rounded up to next power of 2 */
hsize_t max_dims[H5S_MAX_RANK]; /* The max. size of dataset dimensions */
/* Buffered/cached information for types of raw data storage*/
@@ -433,6 +479,7 @@ typedef struct H5D_shared_t {
*/
H5D_rdcc_t chunk; /* Information about chunked data */
} cache;
+ H5D_append_flush_t append_flush; /* Append flush property information */
} H5D_shared_t;
struct H5D_t {
@@ -493,19 +540,28 @@ typedef struct {
hsize_t size; /* Accumulated number of bytes for the selection */
} H5D_vlen_bufsize_t;
+/* Flags for the "edge_chunk_state" field below */
+#define H5D_RDCC_DISABLE_FILTERS 0x01u /* Disable filters on this chunk */
+#define H5D_RDCC_NEWLY_DISABLED_FILTERS 0x02u /* Filters have been disabled since
+ * the last flush */
+
/* Raw data chunks are cached. Each entry in the cache is: */
typedef struct H5D_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
hbool_t deleted; /*chunk about to be deleted */
+ unsigned edge_chunk_state; /*states related to edge chunks (see above) */
hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
H5F_block_t chunk_block; /*offset/length of chunk in file */
+ hsize_t chunk_idx; /*index of chunk in dataset */
uint8_t *chunk; /*the unfiltered chunk data */
unsigned idx; /*index in hash table */
struct H5D_rdcc_ent_t *next;/*next item in doubly-linked list */
struct H5D_rdcc_ent_t *prev;/*previous item in doubly-linked list */
+ struct H5D_rdcc_ent_t *tmp_next;/*next item in temporary doubly-linked list */
+ struct H5D_rdcc_ent_t *tmp_prev;/*previous item in temporary doubly-linked list */
} H5D_rdcc_ent_t;
typedef H5D_rdcc_ent_t *H5D_rdcc_ent_ptr_t; /* For free lists */
@@ -524,6 +580,16 @@ H5_DLLVAR const H5D_layout_ops_t H5D_LOPS_VIRTUAL[1];
/* Chunked layout operations */
H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_BTREE[1];
+H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_NONE[1];
+H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_SINGLE[1];
+H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_EARRAY[1];
+H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_FARRAY[1];
+H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_BT2[1];
+
+/* The v2 B-tree class for indexing chunked datasets with >1 unlimited dimensions */
+H5_DLLVAR const H5B2_class_t H5D_BT2[1];
+H5_DLLVAR const H5B2_class_t H5D_BT2_FILT[1];
+
/******************************/
@@ -550,8 +616,12 @@ H5_DLL herr_t H5D__check_filters(H5D_t *dataset);
H5_DLL herr_t H5D__set_extent(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id);
H5_DLL herr_t H5D__get_dxpl_cache(hid_t dxpl_id, H5D_dxpl_cache_t **cache);
H5_DLL herr_t H5D__flush_sieve_buf(H5D_t *dataset, hid_t dxpl_id);
-H5_DLL herr_t H5D__mark(const H5D_t *dataset, hid_t dxpl_id, unsigned flags);
H5_DLL herr_t H5D__flush_real(H5D_t *dataset, hid_t dxpl_id);
+H5_DLL herr_t H5D__mark(const H5D_t *dataset, hid_t dxpl_id, unsigned flags);
+H5_DLL herr_t H5D__refresh(hid_t dset_id, H5D_t *dataset, hid_t dxpl_id);
+
+/* To convert a dataset's chunk indexing type to v1 B-tree */
+H5_DLL herr_t H5D__format_convert(H5D_t *dataset, hid_t dxpl_id);
/* Internal I/O routines */
H5_DLL herr_t H5D__read(H5D_t *dataset, hid_t mem_type_id,
@@ -584,6 +654,10 @@ H5_DLL herr_t H5D__scatgath_write(const H5D_io_info_t *io_info,
H5_DLL herr_t H5D__layout_set_io_ops(const H5D_t *dataset);
H5_DLL size_t H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout,
hbool_t include_compact_data);
+H5_DLL herr_t H5D__layout_set_latest_version(H5O_layout_t *layout,
+ const H5S_t *space, const H5D_dcpl_cache_t *dcpl_cache);
+H5_DLL herr_t H5D__layout_set_latest_indexing(H5O_layout_t *layout,
+ const H5S_t *space, const H5D_dcpl_cache_t *dcpl_cache);
H5_DLL herr_t H5D__layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh,
H5D_t *dset, hid_t dapl_id);
H5_DLL herr_t H5D__layout_oh_read(H5D_t *dset, hid_t dxpl_id, hid_t dapl_id,
@@ -617,13 +691,15 @@ H5_DLL hbool_t H5D__chunk_is_space_alloc(const H5O_storage_t *storage);
H5_DLL herr_t H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
const hsize_t *scaled, H5D_chunk_ud_t *udata);
H5_DLL void *H5D__chunk_lock(const H5D_io_info_t *io_info,
- H5D_chunk_ud_t *udata, hbool_t relax);
+ H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk);
H5_DLL herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info,
const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk,
uint32_t naccessed);
H5_DLL herr_t H5D__chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
H5_DLL herr_t H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id,
hbool_t full_overwrite, hsize_t old_dim[]);
+H5_DLL herr_t H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id,
+ hsize_t old_dim[]);
H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
const hsize_t *old_dim);
#ifdef H5_HAVE_PARALLEL
@@ -639,12 +715,20 @@ H5_DLL herr_t H5D__chunk_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh,
H5_DLL herr_t H5D__chunk_dump_index(H5D_t *dset, hid_t dxpl_id, FILE *stream);
H5_DLL herr_t H5D__chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
H5O_storage_t *store);
+H5_DLL herr_t H5D__chunk_create_flush_dep(const H5D_rdcc_t *rdcc,
+ const H5O_layout_chunk_t *layout, const hsize_t offset[], void *parent);
+H5_DLL herr_t H5D__chunk_update_flush_dep(const H5D_rdcc_t *rdcc,
+ const H5O_layout_chunk_t *layout, const hsize_t offset[], void *old_parent,
+ void *new_parent);
H5_DLL herr_t H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
hsize_t *offset, uint32_t data_size, const void *buf);
#ifdef H5D_CHUNK_DEBUG
H5_DLL herr_t H5D__chunk_stats(const H5D_t *dset, hbool_t headers);
#endif /* H5D_CHUNK_DEBUG */
+/* format convert */
+H5_DLL herr_t H5D__chunk_format_convert(H5D_t *dset, H5D_chk_idx_info_t *idx_info, H5D_chk_idx_info_t *new_idx_info);
+
/* Functions that operate on compact dataset storage */
H5_DLL herr_t H5D__compact_fill(const H5D_t *dset, hid_t dxpl_id);
H5_DLL herr_t H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src,
@@ -661,6 +745,9 @@ H5_DLL herr_t H5D__virtual_copy(H5F_t *f_src, H5O_layout_t *layout_dst,
H5_DLL herr_t H5D__virtual_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
hid_t dapl_id);
H5_DLL hbool_t H5D__virtual_is_space_alloc(const H5O_storage_t *storage);
+H5_DLL herr_t H5D__virtual_hold_source_dset_files(const H5D_t *dset, H5D_virtual_held_file_t **head);
+H5_DLL herr_t H5D__virtual_refresh_source_dsets(H5D_t *dset, hid_t dxpl_id);
+H5_DLL herr_t H5D__virtual_release_source_dset_files(H5D_virtual_held_file_t *head);
/* Functions that operate on EFL (External File List)*/
H5_DLL hbool_t H5D__efl_is_space_alloc(const H5O_storage_t *storage);
@@ -679,6 +766,17 @@ H5_DLL herr_t H5D__fill_refill_vl(H5D_fill_buf_info_t *fb_info, size_t nelmts,
hid_t dxpl_id);
H5_DLL herr_t H5D__fill_term(H5D_fill_buf_info_t *fb_info);
+/* Functions that operate on chunk proxy objects */
+H5_DLL herr_t H5D__chunk_proxy_create(H5D_t *dset, hid_t dxpl_id,
+ H5D_chunk_ud_t *udata, H5D_rdcc_ent_t *ent);
+H5_DLL herr_t H5D__chunk_proxy_remove(const H5D_t *dset, hid_t dxpl_it,
+ H5D_rdcc_ent_t *ent);
+H5_DLL herr_t H5D__chunk_proxy_mark(H5D_rdcc_ent_t *ent, hbool_t dirty);
+H5_DLL herr_t H5D__chunk_proxy_create_flush_dep(H5D_rdcc_ent_t *ent,
+ void *parent);
+H5_DLL herr_t H5D__chunk_proxy_update_flush_dep(H5D_rdcc_ent_t *ent,
+ void *old_parent, void *new_parent);
+
#ifdef H5_HAVE_PARALLEL
#ifdef H5S_DEBUG
@@ -725,6 +823,7 @@ H5_DLL htri_t H5D__mpio_opt_possible(const H5D_io_info_t *io_info,
#ifdef H5D_TESTING
H5_DLL herr_t H5D__layout_version_test(hid_t did, unsigned *version);
H5_DLL herr_t H5D__layout_contig_size_test(hid_t did, hsize_t *size);
+H5_DLL herr_t H5D__layout_idx_type_test(hid_t did, H5D_chunk_index_t *idx_type);
H5_DLL herr_t H5D__current_cache_size_test(hid_t did, size_t *nbytes_used, int *nused);
#endif /* H5D_TESTING */
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index 3b43aaf..cb04f2e 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -54,6 +54,7 @@
#define H5D_ACS_PREEMPT_READ_CHUNKS_NAME "rdcc_w0" /* Preemption read chunks first */
#define H5D_ACS_VDS_VIEW_NAME "vds_view" /* VDS view option */
#define H5D_ACS_VDS_PRINTF_GAP_NAME "vds_printf_gap" /* VDS printf gap size */
+#define H5D_ACS_APPEND_FLUSH_NAME "append_flush" /* Append flush actions */
/* ======== Data transfer properties ======== */
#define H5D_XFER_MAX_TEMP_BUF_NAME "max_temp_buf" /* Maximum temp buffer size */
@@ -148,8 +149,17 @@ typedef struct H5D_copy_file_ud_t {
H5O_copy_file_ud_common_t common; /* Shared information (must be first) */
struct H5S_extent_t *src_space_extent; /* Copy of dataspace extent for dataset */
H5T_t *src_dtype; /* Copy of datatype for dataset */
+ H5O_layout_t *src_layout; /* Copy of layout for dataset */
} H5D_copy_file_ud_t;
+/* Structure for dataset append flush property (H5Pset_append_flush) */
+typedef struct H5D_append_flush_t {
+ unsigned ndims; /* The # of dimensions for "boundary" */
+ hsize_t boundary[H5S_MAX_RANK]; /* The dimension sizes for determining boundary */
+ H5D_append_cb_t func; /* The callback function */
+ void *udata; /* User data */
+} H5D_append_flush_t;
+
/*****************************/
/* Library Private Variables */
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index a1f87e3..0bb232f 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -34,6 +34,9 @@
#define H5D_CHUNK_CACHE_NBYTES_DEFAULT ((size_t) -1)
#define H5D_CHUNK_CACHE_W0_DEFAULT (-1.0f)
+/* Bit flags for the H5Pset_chunk_opts() and H5Pget_chunk_opts() */
+#define H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS (0x0002u)
+
/* Property names for H5LTDdirect_chunk_write */
#define H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME "direct_chunk_flag"
#define H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME "direct_chunk_filters"
@@ -57,8 +60,13 @@ typedef enum H5D_layout_t {
/* Types of chunk index data structures */
typedef enum H5D_chunk_index_t {
- H5D_CHUNK_IDX_BTREE = 0, /* v1 B-tree index */
- H5D_CHUNK_IDX_NTYPES /* this one must be last! */
+ H5D_CHUNK_IDX_BTREE = 0, /* v1 B-tree index (default) */
+ H5D_CHUNK_IDX_SINGLE = 1, /* Single Chunk index (cur dims[]=max dims[]=chunk dims[]; filtered & non-filtered) */
+ H5D_CHUNK_IDX_NONE = 2, /* Implicit: No Index (H5D_ALLOC_TIME_EARLY, non-filtered, fixed dims) */
+ H5D_CHUNK_IDX_FARRAY = 3, /* Fixed array (for 0 unlimited dims) */
+ H5D_CHUNK_IDX_EARRAY = 4, /* Extensible array (for 1 unlimited dim) */
+ H5D_CHUNK_IDX_BT2 = 5, /* v2 B-tree index (for >1 unlimited dims) */
+ H5D_CHUNK_IDX_NTYPES /*this one must be last! */
} H5D_chunk_index_t;
/* Values for the space allocation time property */
@@ -101,6 +109,9 @@ typedef enum H5D_vds_view_t {
H5D_VDS_LAST_AVAILABLE = 1
} H5D_vds_view_t;
+/* Callback for H5Pset_append_flush() in a dataset access property list */
+typedef herr_t (*H5D_append_cb_t)(hid_t dataset_id, hsize_t *cur_dims, void *op_data);
+
/********************/
/* Public Variables */
/********************/
@@ -149,12 +160,18 @@ H5_DLL herr_t H5Dvlen_get_buf_size(hid_t dataset_id, hid_t type_id, hid_t space_
H5_DLL herr_t H5Dfill(const void *fill, hid_t fill_type, void *buf,
hid_t buf_type, hid_t space);
H5_DLL herr_t H5Dset_extent(hid_t dset_id, const hsize_t size[]);
+H5_DLL herr_t H5Dflush(hid_t dset_id);
+H5_DLL herr_t H5Drefresh(hid_t dset_id);
H5_DLL herr_t H5Dscatter(H5D_scatter_func_t op, void *op_data, hid_t type_id,
hid_t dst_space_id, void *dst_buf);
H5_DLL herr_t H5Dgather(hid_t src_space_id, const void *src_buf, hid_t type_id,
size_t dst_buf_size, void *dst_buf, H5D_gather_func_t op, void *op_data);
H5_DLL herr_t H5Ddebug(hid_t dset_id);
+/* Internal API routines */
+H5_DLL herr_t H5Dformat_convert(hid_t dset_id);
+H5_DLL herr_t H5Dget_chunk_index_type(hid_t did, H5D_chunk_index_t *idx_type);
+
/* Symbols defined for compatibility with previous versions of the HDF5 API.
*
* Use of these symbols is deprecated.
@@ -162,6 +179,7 @@ H5_DLL herr_t H5Ddebug(hid_t dset_id);
#ifndef H5_NO_DEPRECATED_SYMBOLS
/* Macros */
+#define H5D_CHUNK_BTREE H5D_CHUNK_IDX_BTREE
/* Typedefs */
diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c
new file mode 100644
index 0000000..04b8971
--- /dev/null
+++ b/src/H5Dsingle.c
@@ -0,0 +1,557 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
+ * May 2011; updated 10/2015
+ *
+ * Purpose: Single Chunk I/O functions.
+ * This is used when the dataset has only 1 chunk (with or without filter):
+ * cur_dims[] is equal to max_dims[] is equal to the chunk dims[]
+ * non-filter chunk record: [address of the chunk]
+ * filtered chunk record: [address of the chunk, chunk size, filter mask]
+ *
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Dmodule.h" /* This source code file is part of the H5D module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Dpkg.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File space management */
+#include "H5VMprivate.h" /* Vector functions */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Single Chunk Index chunking I/O ops */
+static herr_t H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info,
+ const H5S_t *space, haddr_t dset_ohdr_addr);
+static herr_t H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info);
+static hbool_t H5D__single_idx_is_space_alloc(const H5O_storage_chunk_t *storage);
+static herr_t H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata, const H5D_t *dset);
+static herr_t H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_ud_t *udata);
+static int H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata);
+static herr_t H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_common_ud_t *udata);
+static herr_t H5D__single_idx_delete(const H5D_chk_idx_info_t *idx_info);
+static herr_t H5D__single_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst);
+static herr_t H5D__single_idx_size(const H5D_chk_idx_info_t *idx_info,
+ hsize_t *size);
+static herr_t H5D__single_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr);
+static herr_t H5D__single_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Non Index chunk I/O ops */
+const H5D_chunk_ops_t H5D_COPS_SINGLE[1] = {{
+ FALSE, /* Single Chunk indexing doesn't current support SWMR access */
+ H5D__single_idx_init, /* init */
+ H5D__single_idx_create, /* create */
+ H5D__single_idx_is_space_alloc, /* is_space_alloc */
+ H5D__single_idx_insert, /* insert */
+ H5D__single_idx_get_addr, /* get_addr */
+ NULL, /* resize */
+ H5D__single_idx_iterate, /* iterate */
+ H5D__single_idx_remove, /* remove */
+ H5D__single_idx_delete, /* delete */
+ H5D__single_idx_copy_setup, /* copy_setup */
+ NULL, /* copy_shutdown */
+ H5D__single_idx_size, /* size */
+ H5D__single_idx_reset, /* reset */
+ H5D__single_idx_dump, /* dump */
+ NULL /* destroy */
+}};
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_init
+ *
+ * Purpose: Initialize the indexing information for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi
+ * July, 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_init(const H5D_chk_idx_info_t *idx_info,
+ const H5S_t H5_ATTR_UNUSED *space, haddr_t H5_ATTR_UNUSED dset_ohdr_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+
+ if(idx_info->pline->nused)
+ idx_info->layout->flags |= H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER;
+ else
+ idx_info->layout->flags = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__single_idx_init() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_create
+ *
+ * Purpose: Set up Single Chunk Index: filtered or non-filtered
+ *
+ * Return: Non-negative on success
+ * Negative on failure.
+ *
+ * Programmer: Vailin Choi; July 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_create(const H5D_chk_idx_info_t *idx_info)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(idx_info->layout->max_nchunks == idx_info->layout->nchunks);
+ HDassert(idx_info->layout->nchunks == 1);
+ HDassert(!H5F_addr_defined(idx_info->storage->idx_addr));
+
+ if(idx_info->pline->nused)
+ HDassert(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER);
+ else
+ HDassert(!(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER));
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__single_idx_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_is_space_alloc
+ *
+ * Purpose: Query if space is allocated for the single chunk
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; July 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5D__single_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr))
+} /* end H5D__single_idx_is_space_alloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_insert
+ *
+ * Purpose: Allocate space for the single chunk
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; July 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata,
+ const H5D_t *dset)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(idx_info->layout->nchunks == 1);
+ HDassert(idx_info->layout->max_nchunks == 1);
+ HDassert(udata);
+
+ /* Set the address for the chunk */
+ HDassert(H5F_addr_defined(udata->chunk_block.offset));
+ idx_info->storage->idx_addr = udata->chunk_block.offset;
+
+ if(idx_info->pline->nused > 0) {
+ H5_CHECKED_ASSIGN(idx_info->storage->u.single.nbytes, uint32_t, udata->chunk_block.length, hsize_t);
+ idx_info->storage->u.single.filter_mask = udata->filter_mask;
+ } /* end if */
+
+ if(dset) {
+ if(dset->shared->dcpl_cache.fill.alloc_time != H5D_ALLOC_TIME_EARLY || idx_info->pline->nused > 0) {
+ /* Mark the layout dirty so that the address of the single chunk will be flushed later */
+ if(H5D__mark(dset, idx_info->dxpl_id, H5D_MARK_LAYOUT) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to mark layout as dirty")
+ } /* end if */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__single_idx_insert() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_get_addr
+ *
+ * Purpose: Get the file address of a chunk.
+ * Save the retrieved information in the udata supplied.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; July 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(idx_info->layout->nchunks == 1);
+ HDassert(idx_info->layout->max_nchunks == 1);
+ HDassert(udata);
+
+ udata->chunk_block.offset = idx_info->storage->idx_addr;
+ if(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) {
+ udata->chunk_block.length = idx_info->storage->u.single.nbytes;
+ udata->filter_mask = idx_info->storage->u.single.filter_mask;
+ } /* end if */
+ else {
+ udata->chunk_block.length = idx_info->layout->size;
+ udata->filter_mask = 0;
+ } /* end else */
+ if(!H5F_addr_defined(udata->chunk_block.offset))
+ udata->chunk_block.length = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5D__single_idx_get_addr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_iterate
+ *
+ * Purpose: Make callback for the single chunk
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; July 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__single_idx_iterate(const H5D_chk_idx_info_t *idx_info,
+ H5D_chunk_cb_func_t chunk_cb, void *chunk_udata)
+{
+ H5D_chunk_rec_t chunk_rec; /* generic chunk record */
+ int ret_value = -1; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(chunk_cb);
+ HDassert(chunk_udata);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+
+ /* Initialize generic chunk record */
+ HDmemset(&chunk_rec, 0, sizeof(chunk_rec));
+ chunk_rec.chunk_addr = idx_info->storage->idx_addr;
+
+ if(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) {
+ chunk_rec.nbytes = idx_info->storage->u.single.nbytes;
+ chunk_rec.filter_mask = idx_info->storage->u.single.filter_mask;
+ } /* end if */
+ else {
+ chunk_rec.nbytes = idx_info->layout->size;
+ chunk_rec.filter_mask = 0;
+ } /* end else */
+
+ /* Make "generic chunk" callback */
+ if((ret_value = (*chunk_cb)(&chunk_rec, chunk_udata)) < 0)
+ HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback");
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__single_idx_iterate() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_remove
+ *
+ * Purpose: Remove the single chunk
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; July 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t H5_ATTR_UNUSED *udata)
+{
+ hsize_t nbytes; /* Size of all chunks */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+ HDassert(H5F_addr_defined(idx_info->storage->idx_addr));
+
+ if(idx_info->layout->flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER)
+ nbytes = idx_info->storage->u.single.nbytes;
+ else
+ nbytes = idx_info->layout->size;
+
+ if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, idx_info->storage->idx_addr, nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free dataset chunks")
+
+ idx_info->storage->idx_addr = HADDR_UNDEF;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__single_idx_remove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_delete
+ *
+ * Purpose: Delete raw data storage for entire dataset (i.e. the only chunk)
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Sept 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_delete(const H5D_chk_idx_info_t *idx_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(idx_info);
+ HDassert(idx_info->f);
+ HDassert(idx_info->pline);
+ HDassert(idx_info->layout);
+ HDassert(idx_info->storage);
+
+ if(H5F_addr_defined(idx_info->storage->idx_addr))
+ ret_value = H5D__single_idx_remove(idx_info, NULL);
+ else
+ HDassert(!H5F_addr_defined(idx_info->storage->idx_addr));
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__single_idx_delete() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_copy_setup
+ *
+ * Purpose: Set up any necessary information for copying the single chunk
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src,
+ const H5D_chk_idx_info_t *idx_info_dst)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check args */
+ HDassert(idx_info_src);
+ HDassert(idx_info_src->f);
+ HDassert(idx_info_src->pline);
+ HDassert(idx_info_src->layout);
+ HDassert(idx_info_src->storage);
+ HDassert(H5F_addr_defined(idx_info_src->storage->idx_addr));
+
+ HDassert(idx_info_dst);
+ HDassert(idx_info_dst->f);
+ HDassert(idx_info_dst->pline);
+ HDassert(idx_info_dst->layout);
+ HDassert(idx_info_dst->storage);
+
+ /* Set copied metadata tag */
+ H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL);
+
+ /* Set up information at the destination file */
+ if(H5D__single_idx_create(idx_info_dst) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
+
+ /* Reset metadata tag */
+ H5_END_TAG(FAIL);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__single_idx_copy_setup() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_size
+ *
+ * Purpose: Retrieve the amount of index storage for the chunked dataset
+ *
+ * Return: Success: Non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Sept 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_size(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, hsize_t *index_size)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(index_size);
+
+ *index_size = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__single_idx_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_reset
+ *
+ * Purpose: Reset indexing information.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+
+ /* Reset index info */
+ if(reset_addr)
+ storage->idx_addr = HADDR_UNDEF;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__single_idx_reset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__single_idx_dump
+ *
+ * Purpose: Dump the address of the single chunk
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; September 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__single_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check args */
+ HDassert(storage);
+ HDassert(stream);
+
+ HDfprintf(stream, " Address: %a\n", storage->idx_addr);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5D__single_idx_dump() */
+
diff --git a/src/H5Dtest.c b/src/H5Dtest.c
index fd8ff71..c3b0b19 100644
--- a/src/H5Dtest.c
+++ b/src/H5Dtest.c
@@ -144,6 +144,47 @@ done:
/*--------------------------------------------------------------------------
NAME
+ H5D__layout_idx_type_test
+ PURPOSE
+ Determine the storage layout index type for a dataset's layout information
+ USAGE
+ herr_t H5D__layout_idx_type_test(did, idx_type)
+ hid_t did; IN: Dataset to query
+ H5D_chunk_index_t *idx_type; OUT: Pointer to location to place index type info
+ RETURNS
+ Non-negative on success, negative on failure
+ DESCRIPTION
+ Checks the index type of the storage layout information for a dataset.
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5D__layout_idx_type_test(hid_t did, H5D_chunk_index_t *idx_type)
+{
+ H5D_t *dset; /* Pointer to dataset to query */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Check args */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(did, H5I_DATASET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+ if(dset->shared->layout.type != H5D_CHUNKED)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dataset is not chunked")
+
+ if(idx_type)
+ *idx_type = dset->shared->layout.u.chunk.idx_type;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__layout_idx_type_test() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
H5D__current_cache_size_test
PURPOSE
Determine current the size of the dataset's chunk cache
diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c
index 7c5186d..93fc61c 100644
--- a/src/H5Dvirtual.c
+++ b/src/H5Dvirtual.c
@@ -144,6 +144,9 @@ const H5D_layout_ops_t H5D_LOPS_VIRTUAL[1] = {{
/* Declare a free list to manage the H5O_storage_virtual_name_seg_t struct */
H5FL_DEFINE(H5O_storage_virtual_name_seg_t);
+/* Declare a static free list to manage H5D_virtual_file_list_t structs */
+H5FL_DEFINE_STATIC(H5D_virtual_held_file_t);
+
/*-------------------------------------------------------------------------
@@ -765,7 +768,7 @@ H5D__virtual_open_source_dset(const H5D_t *vdset,
/* Check if we need to open the source file */
if(HDstrcmp(source_dset->file_name, ".")) {
/* Open the source file */
- if(NULL == (src_file = H5F_open(source_dset->file_name, H5F_INTENT(vdset->oloc.file) & H5F_ACC_RDWR, H5P_FILE_CREATE_DEFAULT, vdset->shared->layout.storage.u.virt.source_fapl, dxpl_id)))
+ if(NULL == (src_file = H5F_open(source_dset->file_name, H5F_INTENT(vdset->oloc.file) & (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ), H5P_FILE_CREATE_DEFAULT, vdset->shared->layout.storage.u.virt.source_fapl, dxpl_id)))
H5E_clear_stack(NULL); /* Quick hack until proper support for H5Fopen with missing file is implemented */
else
src_file_open = TRUE;
@@ -2724,3 +2727,220 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__virtual_flush() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__virtual_hold_source_dset_files
+ *
+ * Purpose: Hold open the source files that are open, during a refresh event
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * November 7, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__virtual_hold_source_dset_files(const H5D_t *dset, H5D_virtual_held_file_t **head)
+{
+ H5O_storage_virtual_t *storage; /* Convenient pointer into layout struct */
+ H5D_virtual_held_file_t *tmp; /* Temporary held file node */
+ size_t i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(dset);
+ HDassert(head && NULL == *head);
+
+ /* Set the convenience pointer */
+ storage = &dset->shared->layout.storage.u.virt;
+
+ /* Hold only files for open datasets */
+ for(i = 0; i < storage->list_nused; i++)
+ /* Check for "printf" source dataset resolution */
+ if(storage->list[i].psfn_nsubs || storage->list[i].psdn_nsubs) {
+ size_t j; /* Local index variable */
+
+ /* Iterate over sub-source dsets */
+ for(j = 0; j < storage->list[i].sub_dset_nused; j++)
+ if(storage->list[i].sub_dset[j].dset) {
+ /* Hold open the file */
+ H5F_INCR_NOPEN_OBJS(storage->list[i].sub_dset[j].dset->oloc.file);
+
+ /* Allocate a node for this file */
+ if(NULL == (tmp = H5FL_MALLOC(H5D_virtual_held_file_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate held file node")
+
+ /* Set up node & connect to list */
+ tmp->file = storage->list[i].sub_dset[j].dset->oloc.file;
+ tmp->next = *head;
+ *head = tmp;
+ } /* end if */
+ } /* end if */
+ else
+ if(storage->list[i].source_dset.dset) {
+ /* Hold open the file */
+ H5F_INCR_NOPEN_OBJS(storage->list[i].source_dset.dset->oloc.file);
+
+ /* Allocate a node for this file */
+ if(NULL == (tmp = H5FL_MALLOC(H5D_virtual_held_file_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "can't allocate held file node")
+
+ /* Set up node & connect to list */
+ tmp->file = storage->list[i].source_dset.dset->oloc.file;
+ tmp->next = *head;
+ *head = tmp;
+ } /* end if */
+
+done:
+ if(ret_value < 0) {
+ /* Release hold on files and delete list on error */
+ if(*head)
+ if(H5D__virtual_release_source_dset_files(*head) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release source datasets' files held open")
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__virtual_hold_source_dset_files() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__virtual_refresh_source_dset
+ *
+ * Purpose: Refresh a source dataset
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * November 7, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__virtual_refresh_source_dset(H5D_t **dset, hid_t dxpl_id)
+{
+ hid_t dset_id; /* Temporary dataset identifier */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(dset && *dset);
+
+ /* Get a temporary identifier for this source dataset */
+ if((dset_id = H5I_register(H5I_DATASET, *dset, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "can't register source dataset ID")
+
+ /* Refresh source dataset */
+ if(H5D__refresh(dset_id, *dset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh source dataset")
+
+ /* Discard the identifier & replace the dataset */
+ if(NULL == (*dset = (H5D_t *)H5I_remove(dset_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "can't unregister source dataset ID")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__virtual_refresh_source_dsets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__virtual_refresh_source_dsets
+ *
+ * Purpose: Refresh the source datasets
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Dana Robinson
+ * November, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__virtual_refresh_source_dsets(H5D_t *dset, hid_t dxpl_id)
+{
+ H5O_storage_virtual_t *storage; /* Convenient pointer into layout struct */
+ size_t i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(dset);
+
+ /* Set convenience pointer */
+ storage = &dset->shared->layout.storage.u.virt;
+
+ /* Refresh only open datasets */
+ for(i = 0; i < storage->list_nused; i++)
+ /* Check for "printf" source dataset resolution */
+ if(storage->list[i].psfn_nsubs || storage->list[i].psdn_nsubs) {
+ size_t j; /* Local index variable */
+
+ /* Iterate over sub-source datasets */
+ for(j = 0; j < storage->list[i].sub_dset_nused; j++)
+ /* Check if sub-source dataset is open */
+ if(storage->list[i].sub_dset[j].dset)
+ /* Refresh sub-source dataset */
+ if(H5D__virtual_refresh_source_dset(&storage->list[i].sub_dset[j].dset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh source dataset")
+ } /* end if */
+ else
+ /* Check if source dataset is open */
+ if(storage->list[i].source_dset.dset)
+ /* Refresh source dataset */
+ if(H5D__virtual_refresh_source_dset(&storage->list[i].source_dset.dset, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to refresh source dataset")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__virtual_refresh_source_dsets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__virtual_release_source_dset_files
+ *
+ * Purpose: Release the hold on source files that are open, during a refresh event
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * November 7, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__virtual_release_source_dset_files(H5D_virtual_held_file_t *head)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Release hold on files and delete list */
+ while(head) {
+ H5D_virtual_held_file_t *tmp = head->next; /* Temporary pointer to next node */
+
+ /* Release hold on file */
+ H5F_DECR_NOPEN_OBJS(head->file);
+
+ /* Attempt to close the file */
+ /* (Should always succeed, since the 'top' source file pointer is
+ * essentially "private" to the virtual dataset, since it wasn't
+ * opened through an API routine -QAK)
+ */
+ if(H5F_try_close(head->file) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEFILE, FAIL, "problem attempting file close")
+
+ /* Delete node */
+ (void)H5FL_FREE(H5D_virtual_held_file_t, head);
+
+ /* Advance to next node */
+ head = tmp;
+ } /* end while */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__virtual_release_source_dset_files() */
+
diff --git a/src/H5EA.c b/src/H5EA.c
index be60301..bf18239 100644
--- a/src/H5EA.c
+++ b/src/H5EA.c
@@ -88,6 +88,8 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
* client class..
*/
const H5EA_class_t *const H5EA_client_class_g[] = {
+ H5EA_CLS_CHUNK, /* 0 - H5EA_CLS_CHUNK_ID */
+ H5EA_CLS_FILT_CHUNK, /* 1 - H5EA_CLS_FILT_CHUNK_ID */
H5EA_CLS_TEST, /* ? - H5EA_CLS_TEST_ID */
};
@@ -104,6 +106,8 @@ const H5EA_class_t *const H5EA_client_class_g[] = {
/* Declare a free list to manage the H5EA_t struct */
H5FL_DEFINE_STATIC(H5EA_t);
+/* Declare a PQ free list to manage the element */
+H5FL_BLK_DEFINE(ea_native_elmt);
/*-------------------------------------------------------------------------
@@ -817,16 +821,42 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/*
* Check arguments.
+ *
+ * At present, this function is only used to setup a flush dependency
+ * between an object header proxy and the extensible array header when
+ * the extensible array is being used to index a chunked data set.
+ *
+ * Make sure that the parameters are congruent with this.
*/
HDassert(ea);
HDassert(hdr);
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(parent_entry->type);
+ HDassert(parent_entry->type->id == H5AC_OHDR_PROXY_ID);
+ HDassert((hdr->fd_parent_addr == HADDR_UNDEF) ||
+ (hdr->fd_parent_addr == parent_entry->addr));
+ HDassert((hdr->fd_parent_ptr == NULL) ||
+ (hdr->fd_parent_ptr == parent_entry));
- /* Set the shared array header's file context for this operation */
- hdr->f = ea->f;
+ /*
+ * Check to see if the flush dependency between the object header proxy
+ * and the extensible array header has already been setup. If it hasn't
+ * set it up.
+ */
+ if(!H5F_addr_defined(hdr->fd_parent_addr)) {
+ /* Set the shared array header's file context for this operation */
+ hdr->f = ea->f;
+
+ /* Set up flush dependency between parent entry and extensible
+ * array header
+ */
+ if(H5EA__create_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency on file metadata")
- /* Set up flush dependency between parent entry and extensible array header */
- if(H5EA__create_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
- H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency on file metadata")
+ hdr->fd_parent_addr = parent_entry->addr;
+ hdr->fd_parent_ptr = parent_entry;
+ } /* end if */
CATCH
@@ -877,116 +907,6 @@ END_FUNC(PRIV) /* end H5EA_undepend() */
/*-------------------------------------------------------------------------
- * Function: H5EA_support
- *
- * Purpose: Create a child flush dependency on the array metadata that
- * contains the element for an array index.
- *
- * Return: SUCCEED/FAIL
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * May 21 2009
- *
- *-------------------------------------------------------------------------
- */
-BEGIN_FUNC(PRIV, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA_support(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, H5AC_info_t *child_entry))
-
- /* Local variables */
- void *thing = NULL; /* Pointer to the array metadata containing the array index we are interested in */
- uint8_t *thing_elmt_buf; /* Pointer to the element buffer for the array metadata */
- hsize_t thing_elmt_idx; /* Index of the element in the element buffer for the array metadata */
- H5EA__unprotect_func_t thing_unprot_func; /* Function pointer for unprotecting the array metadata */
-
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
-#endif /* QAK */
-
- /*
- * Check arguments.
- */
- HDassert(ea);
-
- /* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
- H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
-
- /* Sanity check */
- HDassert(thing);
- HDassert(thing_elmt_buf);
- HDassert(thing_unprot_func);
-
- /* Set up flush dependency between child_entry and metadata array 'thing' */
- if(H5EA__create_flush_depend((H5AC_info_t *)thing, child_entry) < 0)
- H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency on array metadata")
-
-CATCH
- /* Release resources */
- if(thing && (thing_unprot_func)(thing, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
- H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array metadata")
-
-END_FUNC(PRIV) /* end H5EA_support() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5EA_unsupport
- *
- * Purpose: Remove a flush dependency on the array metadata that contains
- * the element for an array index.
- *
- * Return: SUCCEED/FAIL
- *
- * Programmer: Quincey Koziol
- * koziol@hdfgroup.org
- * May 21 2009
- *
- *-------------------------------------------------------------------------
- */
-BEGIN_FUNC(PRIV, ERR,
-herr_t, SUCCEED, FAIL,
-H5EA_unsupport(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, H5AC_info_t *child_entry))
-
- /* Local variables */
- void *thing = NULL; /* Pointer to the array metadata containing the array index we are interested in */
- uint8_t *thing_elmt_buf; /* Pointer to the element buffer for the array metadata */
- hsize_t thing_elmt_idx; /* Index of the element in the element buffer for the array metadata */
- H5EA__unprotect_func_t thing_unprot_func; /* Function pointer for unprotecting the array metadata */
-
-#ifdef QAK
-HDfprintf(stderr, "%s: Called\n", FUNC);
-HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
-#endif /* QAK */
-
- /*
- * Check arguments.
- */
- HDassert(ea);
-
- /* Look up the array metadata containing the element we want to set */
- if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
- H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
-
- /* Sanity check */
- HDassert(thing);
- HDassert(thing_elmt_buf);
- HDassert(thing_unprot_func);
-
- /* Remove flush dependency between child_entry and metadata array 'thing' */
- if(H5EA__destroy_flush_depend((H5AC_info_t *)thing, child_entry) < 0)
- H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency on array metadata")
-
-CATCH
- /* Release resources */
- if(thing && (thing_unprot_func)(thing, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
- H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array metadata")
-
-END_FUNC(PRIV) /* end H5EA_unsupport() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5EA_close
*
* Purpose: Close an extensible array
@@ -1142,3 +1062,56 @@ CATCH
END_FUNC(PRIV) /* end H5EA_delete() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5EA_iterate
+ *
+ * Purpose: Iterate over the elements of an extensible array
+ * (copied and modified from FA_iterate() in H5FA.c)
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Vailin Choi; Feb 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PRIV, ERR,
+herr_t, SUCCEED, FAIL,
+H5EA_iterate(H5EA_t *ea, hid_t dxpl_id, H5EA_operator_t op, void *udata))
+
+ /* Local variables */
+ uint8_t *elmt = NULL;
+ hsize_t u;
+
+ /*
+ * Check arguments.
+ */
+ HDassert(ea);
+ HDassert(op);
+ HDassert(udata);
+
+ /* Allocate space for a native array element */
+ if(NULL == (elmt = H5FL_BLK_MALLOC(ea_native_elmt, ea->hdr->cparam.cls->nat_elmt_size)))
+ H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array element")
+
+ /* Iterate over all elements in array */
+ for(u = 0; u < ea->hdr->stats.stored.max_idx_set; u++) {
+ int cb_ret; /* Return value from callback */
+
+ /* Get array element */
+ if(H5EA_get(ea, dxpl_id, u, elmt) < 0)
+ H5E_THROW(H5E_CANTGET, "unable to delete fixed array")
+
+ /* Make callback */
+ if((cb_ret = (*op)(u, elmt, udata)) < 0) {
+ H5E_PRINTF(H5E_BADITER, "iterator function failed");
+ H5_LEAVE(cb_ret)
+ } /* end if */
+ } /* end for */
+
+CATCH
+
+ if(elmt)
+ elmt = H5FL_BLK_FREE(ea_native_elmt, elmt);
+
+END_FUNC(PRIV) /* end H5EA_iterate() */
diff --git a/src/H5EAcache.c b/src/H5EAcache.c
index e9c93d4..d3f3d41 100644
--- a/src/H5EAcache.c
+++ b/src/H5EAcache.c
@@ -73,16 +73,23 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static herr_t H5EA__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5EA__cache_hdr_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5EA__cache_hdr_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5EA__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5EA__cache_hdr_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5EA__cache_hdr_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5EA__cache_hdr_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5EA__cache_hdr_free_icr(void *thing);
-static herr_t H5EA__cache_iblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5EA__cache_iblock_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5EA__cache_iblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5EA__cache_iblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5EA__cache_iblock_image_len(const void *thing,
@@ -93,7 +100,10 @@ static herr_t H5EA__cache_iblock_serialize(const H5F_t *f, void *image, size_t l
static herr_t H5EA__cache_iblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5EA__cache_iblock_free_icr(void *thing);
-static herr_t H5EA__cache_sblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5EA__cache_sblock_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5EA__cache_sblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5EA__cache_sblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5EA__cache_sblock_image_len(const void *thing,
@@ -104,7 +114,10 @@ static herr_t H5EA__cache_sblock_serialize(const H5F_t *f, void *image, size_t l
static herr_t H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5EA__cache_sblock_free_icr(void *thing);
-static herr_t H5EA__cache_dblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5EA__cache_dblock_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5EA__cache_dblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5EA__cache_dblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5EA__cache_dblock_image_len(const void *thing,
@@ -116,7 +129,10 @@ static herr_t H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *thing
static herr_t H5EA__cache_dblock_free_icr(void *thing);
static herr_t H5EA__cache_dblock_fsf_size(const void *thing, size_t *fsf_size);
-static herr_t H5EA__cache_dblk_page_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5EA__cache_dblk_page_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5EA__cache_dblk_page_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5EA__cache_dblk_page_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5EA__cache_dblk_page_image_len(const void *thing,
@@ -139,11 +155,12 @@ const H5AC_class_t H5AC_EARRAY_HDR[1] = {{
H5FD_MEM_EARRAY_HDR, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5EA__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_hdr_verify_chksum, /* 'verify_chksum' callback */
H5EA__cache_hdr_deserialize, /* 'deserialize' callback */
H5EA__cache_hdr_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5EA__cache_hdr_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5EA__cache_hdr_notify, /* 'notify' callback */
H5EA__cache_hdr_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -156,6 +173,7 @@ const H5AC_class_t H5AC_EARRAY_IBLOCK[1] = {{
H5FD_MEM_EARRAY_IBLOCK, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5EA__cache_iblock_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_iblock_verify_chksum, /* 'verify_chksum' callback */
H5EA__cache_iblock_deserialize, /* 'deserialize' callback */
H5EA__cache_iblock_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -173,6 +191,7 @@ const H5AC_class_t H5AC_EARRAY_SBLOCK[1] = {{
H5FD_MEM_EARRAY_SBLOCK, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5EA__cache_sblock_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_sblock_verify_chksum, /* 'verify_chksum' callback */
H5EA__cache_sblock_deserialize, /* 'deserialize' callback */
H5EA__cache_sblock_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -190,6 +209,7 @@ const H5AC_class_t H5AC_EARRAY_DBLOCK[1] = {{
H5FD_MEM_EARRAY_DBLOCK, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5EA__cache_dblock_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_dblock_verify_chksum, /* 'verify_chksum' callback */
H5EA__cache_dblock_deserialize, /* 'deserialize' callback */
H5EA__cache_dblock_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -207,6 +227,7 @@ const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1] = {{
H5FD_MEM_EARRAY_DBLK_PAGE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5EA__cache_dblk_page_get_load_size, /* 'get_load_size' callback */
+ H5EA__cache_dblk_page_verify_chksum, /* 'verify_chksum' callback */
H5EA__cache_dblk_page_deserialize, /* 'deserialize' callback */
H5EA__cache_dblk_page_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -244,24 +265,64 @@ const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1] = {{
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5EA__cache_hdr_get_load_size(const void *_udata, size_t *image_len))
+H5EA__cache_hdr_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5EA_hdr_cache_ud_t *udata = (const H5EA_hdr_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5EA_hdr_cache_ud_t *udata = (H5EA_hdr_cache_ud_t *)_udata; /* User data for callback */
/* Check arguments */
HDassert(udata);
HDassert(udata->f);
- HDassert(H5F_addr_defined(udata->addr));
HDassert(image_len);
- /* Set the image length size */
- *image_len = (size_t)H5EA_HEADER_SIZE_FILE(udata->f);
+ if(image == NULL) {
+ /* Set the image length size */
+ *image_len = (size_t)H5EA_HEADER_SIZE_FILE(udata->f);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5EA__cache_hdr_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_hdr_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5EA__cache_hdr_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5EA__cache_hdr_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__cache_hdr_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -286,7 +347,6 @@ H5EA__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
H5EA_hdr_cache_ud_t *udata = (H5EA_hdr_cache_ud_t *)_udata;
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
/* Check arguments */
HDassert(image);
@@ -361,17 +421,11 @@ H5EA__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* (allow for checksum not decoded yet) */
HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5EA_SIZEOF_CHKSUM));
- /* Compute checksum on entire header */
- /* (including the filter information, if present) */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for extensible array header")
-
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == len);
@@ -494,6 +548,82 @@ END_FUNC(STATIC) /* end H5EA__cache_hdr_serialize() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_hdr_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 11/30/15
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, ERR,
+herr_t, SUCCEED, FAIL,
+H5EA__cache_hdr_notify(H5AC_notify_action_t action, void *_thing))
+
+ /* Local variables */
+ H5EA_hdr_t *hdr = (H5EA_hdr_t *)_thing; /* Pointer to the object */
+
+ /* Sanity check */
+ HDassert(hdr);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* If hdr->fd_parent_addr != HADDR_UNDEF, the extensible
+ * array header must be employed as the index for a chunked
+ * data set which has been modified by the SWMR writer.
+ *
+ * In this case, hdr->fd_parent_addr must contain the
+ * address of object header proxy which is the flush
+ * dependency parent of the extensible array header.
+ *
+ * hdr->fd_parent_addr (and hdr->fd_parent_ptr) are used to
+ * destroy the flush dependency before the extensible array
+ * header is evicted.
+ */
+ if(hdr->fd_parent_addr != HADDR_UNDEF) {
+ HDassert(hdr->fd_parent_ptr);
+ HDassert(hdr->fd_parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->fd_parent_ptr->addr == hdr->fd_parent_addr);
+ HDassert(hdr->fd_parent_ptr->type);
+ HDassert(hdr->fd_parent_ptr->type->id == H5AC_OHDR_PROXY_ID);
+
+ /* Destroy flush dependency on object header proxy */
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)hdr->fd_parent_ptr, (H5AC_info_t *)hdr) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between ea header and object header proxy, address = %llu", (unsigned long long)hdr->fd_parent_addr)
+ } /* end if */
+ break;
+
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+ else {
+ HDassert(hdr->fd_parent_addr == HADDR_UNDEF);
+ HDassert(hdr->fd_parent_ptr == NULL);
+ } /* end else */
+
+CATCH
+
+END_FUNC(STATIC) /* end H5EA__cache_hdr_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__cache_hdr_free_icr
*
* Purpose: Destroy/release an "in core representation" of a data
@@ -538,30 +668,71 @@ END_FUNC(STATIC) /* end H5EA__cache_hdr_free_icr() */
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5EA__cache_iblock_get_load_size(const void *_udata, size_t *image_len))
+H5EA__cache_iblock_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5EA_hdr_t *hdr = (const H5EA_hdr_t *)_udata; /* User data for callback */
- H5EA_iblock_t iblock; /* Fake index block for computing size */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5EA_hdr_t *hdr = (H5EA_hdr_t *)_udata; /* User data for callback */
+ H5EA_iblock_t iblock; /* Fake index block for computing size */
/* Check arguments */
HDassert(hdr);
HDassert(image_len);
- /* Set up fake index block for computing size on disk */
- HDmemset(&iblock, 0, sizeof(iblock));
- iblock.hdr = (H5EA_hdr_t *)hdr; /* Casting away 'const' OK - QAK */
- iblock.nsblks = H5EA_SBLK_FIRST_IDX(hdr->cparam.sup_blk_min_data_ptrs);
- iblock.ndblk_addrs = 2 * ((size_t)hdr->cparam.sup_blk_min_data_ptrs - 1);
- iblock.nsblk_addrs = hdr->nsblks - iblock.nsblks;
-
- /* Set the image length size */
- *image_len = (size_t)H5EA_IBLOCK_SIZE(&iblock);
+ if(image == NULL) {
+ /* Set up fake index block for computing size on disk */
+ HDmemset(&iblock, 0, sizeof(iblock));
+ iblock.hdr = (H5EA_hdr_t *)hdr; /* Casting away 'const' OK - QAK */
+ iblock.nsblks = H5EA_SBLK_FIRST_IDX(hdr->cparam.sup_blk_min_data_ptrs);
+ iblock.ndblk_addrs = 2 * ((size_t)hdr->cparam.sup_blk_min_data_ptrs - 1);
+ iblock.nsblk_addrs = hdr->nsblks - iblock.nsblks;
+
+ /* Set the image length size */
+ *image_len = (size_t)H5EA_IBLOCK_SIZE(&iblock);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5EA__cache_iblock_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_iblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5EA__cache_iblock_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5EA__cache_iblock_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__cache_iblock_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -585,7 +756,6 @@ H5EA__cache_iblock_deserialize(const void *_image, size_t len,
H5EA_hdr_t *hdr = (H5EA_hdr_t *)_udata; /* User data for callback */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
size_t u; /* Local index variable */
@@ -649,8 +819,7 @@ H5EA__cache_iblock_deserialize(const void *_image, size_t len,
/* Save the index block's size */
iblock->size = len;
- /* Compute checksum on index block */
- computed_chksum = H5_checksum_metadata((const uint8_t *)_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -658,10 +827,6 @@ H5EA__cache_iblock_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == iblock->size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for extensible array index block")
-
/* Set return value */
ret_value = iblock;
@@ -815,28 +980,35 @@ H5EA__cache_iblock_notify(H5AC_notify_action_t action, void *_thing))
/* Sanity check */
HDassert(iblock);
- /* Determine which action to take */
- switch(action) {
- case H5AC_NOTIFY_ACTION_AFTER_INSERT:
- case H5AC_NOTIFY_ACTION_AFTER_LOAD:
- /* Create flush dependency on extensible array header */
- if(H5EA__create_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0)
- H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between index block and header, address = %llu", (unsigned long long)iblock->addr)
- break;
-
- case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
- break;
-
- case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
- /* Destroy flush dependency on extensible array header */
- if(H5EA__destroy_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0)
- H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between index block and header, address = %llu", (unsigned long long)iblock->addr)
- break;
-
- default:
- H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
- } /* end switch */
+ /* Check if the file was opened with SWMR-write access */
+ if(iblock->hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on extensible array header */
+ if(H5EA__create_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between index block and header, address = %llu", (unsigned long long)iblock->addr)
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on extensible array header */
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)iblock->hdr, (H5AC_info_t *)iblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between index block and header, address = %llu", (unsigned long long)iblock->addr)
+ break;
+
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
CATCH
@@ -888,50 +1060,92 @@ END_FUNC(STATIC) /* end H5EA__cache_iblock_free_icr() */
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5EA__cache_sblock_get_load_size(const void *_udata, size_t *image_len))
+H5EA__cache_sblock_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5EA_sblock_cache_ud_t *udata = (const H5EA_sblock_cache_ud_t *)_udata; /* User data */
- H5EA_sblock_t sblock; /* Fake super block for computing size */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5EA_sblock_cache_ud_t *udata = (H5EA_sblock_cache_ud_t *)_udata; /* User data */
+ H5EA_sblock_t sblock; /* Fake super block for computing size */
/* Check arguments */
HDassert(udata);
HDassert(udata->hdr);
- HDassert(udata->parent);
HDassert(udata->sblk_idx > 0);
HDassert(H5F_addr_defined(udata->sblk_addr));
HDassert(image_len);
- /* Set up fake super block for computing size on disk */
- /* (Note: extracted from H5EA__sblock_alloc) */
- HDmemset(&sblock, 0, sizeof(sblock));
- sblock.hdr = udata->hdr;
- sblock.ndblks = udata->hdr->sblk_info[udata->sblk_idx].ndblks;
- sblock.dblk_nelmts = udata->hdr->sblk_info[udata->sblk_idx].dblk_nelmts;
+ if(image == NULL) {
+ /* Set up fake super block for computing size on disk */
+ /* (Note: extracted from H5EA__sblock_alloc) */
+ HDmemset(&sblock, 0, sizeof(sblock));
+ sblock.hdr = udata->hdr;
+ sblock.ndblks = udata->hdr->sblk_info[udata->sblk_idx].ndblks;
+ sblock.dblk_nelmts = udata->hdr->sblk_info[udata->sblk_idx].dblk_nelmts;
- /* Check if # of elements in data blocks requires paging */
- if(sblock.dblk_nelmts > udata->hdr->dblk_page_nelmts) {
- /* Compute # of pages in each data block from this super block */
- sblock.dblk_npages = sblock.dblk_nelmts / udata->hdr->dblk_page_nelmts;
+ /* Check if # of elements in data blocks requires paging */
+ if(sblock.dblk_nelmts > udata->hdr->dblk_page_nelmts) {
+ /* Compute # of pages in each data block from this super block */
+ sblock.dblk_npages = sblock.dblk_nelmts / udata->hdr->dblk_page_nelmts;
- /* Sanity check that we have at least 2 pages in data block */
- HDassert(sblock.dblk_npages > 1);
+ /* Sanity check that we have at least 2 pages in data block */
+ HDassert(sblock.dblk_npages > 1);
- /* Sanity check for integer truncation */
- HDassert((sblock.dblk_npages * udata->hdr->dblk_page_nelmts) == sblock.dblk_nelmts);
+ /* Sanity check for integer truncation */
+ HDassert((sblock.dblk_npages * udata->hdr->dblk_page_nelmts) == sblock.dblk_nelmts);
- /* Compute size of buffer for each data block's 'page init' bitmask */
- sblock.dblk_page_init_size = ((sblock.dblk_npages) + 7) / 8;
- HDassert(sblock.dblk_page_init_size > 0);
- } /* end if */
+ /* Compute size of buffer for each data block's 'page init' bitmask */
+ sblock.dblk_page_init_size = ((sblock.dblk_npages) + 7) / 8;
+ HDassert(sblock.dblk_page_init_size > 0);
+ } /* end if */
- /* Set the image length size */
- *image_len = (size_t)H5EA_SBLOCK_SIZE(&sblock);
+ /* Set the image length size */
+ *image_len = (size_t)H5EA_SBLOCK_SIZE(&sblock);
+
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5EA__cache_sblock_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_sblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5EA__cache_sblock_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5EA__cache_sblock_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__cache_sblock_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -955,7 +1169,6 @@ H5EA__cache_sblock_deserialize(const void *_image, size_t len,
H5EA_sblock_cache_ud_t *udata = (H5EA_sblock_cache_ud_t *)_udata; /* User data */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
size_t u; /* Local index variable */
@@ -1016,8 +1229,7 @@ H5EA__cache_sblock_deserialize(const void *_image, size_t len,
/* Save the super block's size */
sblock->size = len;
- /* Compute checksum on super block */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -1025,10 +1237,6 @@ H5EA__cache_sblock_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == sblock->size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for extensible array super block")
-
/* Set return value */
ret_value = sblock;
@@ -1169,28 +1377,35 @@ H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *_thing))
/* Sanity check */
HDassert(sblock);
- /* Determine which action to take */
- switch(action) {
- case H5AC_NOTIFY_ACTION_AFTER_INSERT:
- case H5AC_NOTIFY_ACTION_AFTER_LOAD:
- /* Create flush dependency on index block */
- if(H5EA__create_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
- H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between super block and index block, address = %llu", (unsigned long long)sblock->addr)
- break;
-
- case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
- break;
-
- case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
- /* Destroy flush dependency on index block */
- if(H5EA__destroy_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
- H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and index block, address = %llu", (unsigned long long)sblock->addr)
- break;
-
- default:
- H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
- } /* end switch */
+ /* Check if the file was opened with SWMR-write access */
+ if(sblock->hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on index block */
+ if(H5EA__create_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between super block and index block, address = %llu", (unsigned long long)sblock->addr)
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on index block */
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)sblock->parent, (H5AC_info_t *)sblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between super block and index block, address = %llu", (unsigned long long)sblock->addr)
+ break;
+
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
CATCH
@@ -1242,51 +1457,92 @@ END_FUNC(STATIC) /* end H5EA__cache_sblock_free_icr() */
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5EA__cache_dblock_get_load_size(const void *_udata, size_t *image_len))
+H5EA__cache_dblock_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5EA_dblock_cache_ud_t *udata = (const H5EA_dblock_cache_ud_t *)_udata; /* User data */
- H5EA_dblock_t dblock; /* Fake data block for computing size */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5EA_dblock_cache_ud_t *udata = (H5EA_dblock_cache_ud_t *)_udata; /* User data */
+ H5EA_dblock_t dblock; /* Fake data block for computing size */
/* Check arguments */
HDassert(udata);
HDassert(udata->hdr);
- HDassert(udata->parent);
HDassert(udata->nelmts > 0);
HDassert(image_len);
- /* Set up fake data block for computing size on disk */
- /* (Note: extracted from H5EA__dblock_alloc) */
- HDmemset(&dblock, 0, sizeof(dblock));
-
- /* need to set:
- *
- * dblock.hdr
- * dblock.npages
- * dblock.nelmts
- *
- * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or
- * H5EA_DBLOCK_SIZE().
- */
- dblock.hdr = udata->hdr;
- dblock.nelmts = udata->nelmts;
-
- if(udata->nelmts > udata->hdr->dblk_page_nelmts) {
- /* Set the # of pages in the direct block */
- dblock.npages = udata->nelmts / udata->hdr->dblk_page_nelmts;
- HDassert(udata->nelmts==(dblock.npages * udata->hdr->dblk_page_nelmts));
- } /* end if */
-
- /* Set the image length size */
- if(!dblock.npages)
- *image_len = H5EA_DBLOCK_SIZE(&dblock);
- else
- *image_len = H5EA_DBLOCK_PREFIX_SIZE(&dblock);
+ if(image == NULL) {
+ /* Set up fake data block for computing size on disk */
+ /* (Note: extracted from H5EA__dblock_alloc) */
+ HDmemset(&dblock, 0, sizeof(dblock));
+
+ /* need to set:
+ *
+ * dblock.hdr
+ * dblock.npages
+ * dblock.nelmts
+ *
+ * before we invoke either H5EA_DBLOCK_PREFIX_SIZE() or
+ * H5EA_DBLOCK_SIZE().
+ */
+ dblock.hdr = udata->hdr;
+ dblock.nelmts = udata->nelmts;
+
+ if(udata->nelmts > udata->hdr->dblk_page_nelmts) {
+ /* Set the # of pages in the direct block */
+ dblock.npages = udata->nelmts / udata->hdr->dblk_page_nelmts;
+ HDassert(udata->nelmts==(dblock.npages * udata->hdr->dblk_page_nelmts));
+ } /* end if */
+
+ /* Set the image length size */
+ if(!dblock.npages)
+ *image_len = H5EA_DBLOCK_SIZE(&dblock);
+ else
+ *image_len = H5EA_DBLOCK_PREFIX_SIZE(&dblock);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5EA__cache_dblock_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_dblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5EA__cache_dblock_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5EA__cache_sblock_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__cache_dblock_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -1310,7 +1566,6 @@ H5EA__cache_dblock_deserialize(const void *_image, size_t len,
H5EA_dblock_cache_ud_t *udata = (H5EA_dblock_cache_ud_t *)_udata; /* User data */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
/* Check arguments */
@@ -1370,8 +1625,7 @@ H5EA__cache_dblock_deserialize(const void *_image, size_t len,
/* (Note: This is not the same as the image length, for paged data blocks) */
dblock->size = H5EA_DBLOCK_SIZE(dblock);
- /* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -1379,10 +1633,6 @@ H5EA__cache_dblock_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == len);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for extensible array data block")
-
/* Set return value */
ret_value = dblock;
@@ -1524,28 +1774,35 @@ H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing))
/* Check arguments */
HDassert(dblock);
- /* Determine which action to take */
- switch(action) {
- case H5AC_NOTIFY_ACTION_AFTER_INSERT:
- case H5AC_NOTIFY_ACTION_AFTER_LOAD:
- /* Create flush dependency on parent */
- if(H5EA__create_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
- H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and parent, address = %llu", (unsigned long long)dblock->addr)
- break;
-
- case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
- break;
-
- case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
- /* Destroy flush dependency on parent */
- if(H5EA__destroy_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
- H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block and parent, address = %llu", (unsigned long long)dblock->addr)
- break;
-
- default:
- H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
- } /* end switch */
+ /* Check if the file was opened with SWMR-write access */
+ if(dblock->hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5EA__create_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and parent, address = %llu", (unsigned long long)dblock->addr)
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)dblock->parent, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block and parent, address = %llu", (unsigned long long)dblock->addr)
+ break;
+
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
CATCH
@@ -1644,23 +1901,64 @@ END_FUNC(STATIC) /* end H5EA__cache_dblock_fsf_size() */
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5EA__cache_dblk_page_get_load_size(const void *_udata, size_t *image_len))
+H5EA__cache_dblk_page_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5EA_dblk_page_cache_ud_t *udata = (const H5EA_dblk_page_cache_ud_t *)_udata; /* User data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5EA_dblk_page_cache_ud_t *udata = (H5EA_dblk_page_cache_ud_t *)_udata; /* User data */
/* Check arguments */
HDassert(udata);
HDassert(udata->hdr);
- HDassert(udata->parent);
HDassert(image_len);
- *image_len = (size_t)H5EA_DBLK_PAGE_SIZE(udata->hdr);
+ if(image == NULL)
+ *image_len = (size_t)H5EA_DBLK_PAGE_SIZE(udata->hdr);
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5EA__cache_dblk_page_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5EA__cache_dblk_page_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5EA__cache_dblk_page_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5EA__cache_dblk_page_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5EA__cache_dblk_page_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -1684,7 +1982,6 @@ H5EA__cache_dblk_page_deserialize(const void *_image, size_t len,
H5EA_dblk_page_cache_ud_t *udata = (H5EA_dblk_page_cache_ud_t *)_udata; /* User data for loading data block page */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
/* Sanity check */
HDassert(udata);
@@ -1714,8 +2011,7 @@ H5EA__cache_dblk_page_deserialize(const void *_image, size_t len,
/* Set the data block page's size */
dblk_page->size = len;
- /* Compute checksum on data block page */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -1723,10 +2019,6 @@ H5EA__cache_dblk_page_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == dblk_page->size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for extensible array data block page")
-
/* Set return value */
ret_value = dblk_page;
@@ -1846,28 +2138,35 @@ H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing))
/* Sanity check */
HDassert(dblk_page);
- /* Determine which action to take */
- switch(action) {
- case H5AC_NOTIFY_ACTION_AFTER_INSERT:
- case H5AC_NOTIFY_ACTION_AFTER_LOAD:
- /* Create flush dependency on parent */
- if(H5EA__create_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
- H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block page and parent, address = %llu", (unsigned long long)dblk_page->addr)
+ /* Check if the file was opened with SWMR-write access */
+ if(dblk_page->hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5EA__create_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block page and parent, address = %llu", (unsigned long long)dblk_page->addr)
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5EA__destroy_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and parent, address = %llu", (unsigned long long)dblk_page->addr)
break;
- case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
- /* do nothing */
- break;
-
- case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
- /* Destroy flush dependency on parent */
- if(H5EA__destroy_flush_depend((H5AC_info_t *)dblk_page->parent, (H5AC_info_t *)dblk_page) < 0)
- H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between data block page and parent, address = %llu", (unsigned long long)dblk_page->addr)
- break;
-
- default:
- H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
- } /* end switch */
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
CATCH
diff --git a/src/H5EAhdr.c b/src/H5EAhdr.c
index eff4b54..e60f804 100644
--- a/src/H5EAhdr.c
+++ b/src/H5EAhdr.c
@@ -128,13 +128,16 @@ H5EA__hdr_alloc(H5F_t *f))
/* Allocate space for the shared information */
if(NULL == (hdr = H5FL_CALLOC(H5EA_hdr_t)))
- H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array shared header")
+ H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array shared header")
/* Set non-zero internal fields */
hdr->addr = HADDR_UNDEF;
+ hdr->fd_parent_addr = HADDR_UNDEF;
+ hdr->fd_parent_ptr = NULL;
/* Set the internal parameters for the array */
hdr->f = f;
+ hdr->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0;
hdr->sizeof_addr = H5F_SIZEOF_ADDR(f);
hdr->sizeof_size = H5F_SIZEOF_SIZE(f);
@@ -390,35 +393,35 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Check for valid parameters */
if(cparam->raw_elmt_size == 0)
- H5E_THROW(H5E_BADVALUE, "element size must be greater than zero")
+ H5E_THROW(H5E_BADVALUE, "element size must be greater than zero")
if(cparam->max_nelmts_bits == 0)
- H5E_THROW(H5E_BADVALUE, "max. # of elements bits must be greater than zero")
+ H5E_THROW(H5E_BADVALUE, "max. # of elements bits must be greater than zero")
if(cparam->max_nelmts_bits > H5EA_MAX_NELMTS_IDX_MAX)
- H5E_THROW(H5E_BADVALUE, "max. # of elements bits must be <= %u", (unsigned)H5EA_MAX_NELMTS_IDX_MAX)
+ H5E_THROW(H5E_BADVALUE, "max. # of elements bits must be <= %u", (unsigned)H5EA_MAX_NELMTS_IDX_MAX)
if(cparam->sup_blk_min_data_ptrs < 2)
- H5E_THROW(H5E_BADVALUE, "min # of data block pointers in super block must be >= two")
+ H5E_THROW(H5E_BADVALUE, "min # of data block pointers in super block must be >= two")
if(!POWER_OF_TWO(cparam->sup_blk_min_data_ptrs))
- H5E_THROW(H5E_BADVALUE, "min # of data block pointers in super block must be power of two")
+ H5E_THROW(H5E_BADVALUE, "min # of data block pointers in super block must be power of two")
if(!POWER_OF_TWO(cparam->data_blk_min_elmts))
- H5E_THROW(H5E_BADVALUE, "min # of elements per data block must be power of two")
+ H5E_THROW(H5E_BADVALUE, "min # of elements per data block must be power of two")
dblk_page_nelmts = (size_t)1 << cparam->max_dblk_page_nelmts_bits;
if(dblk_page_nelmts < cparam->idx_blk_elmts)
- H5E_THROW(H5E_BADVALUE, "# of elements per data block page must be greater than # of elements in index block")
+ H5E_THROW(H5E_BADVALUE, "# of elements per data block page must be greater than # of elements in index block")
/* Compute the number of elements in data blocks for first actual super block */
sblk_idx = H5EA_SBLK_FIRST_IDX(cparam->sup_blk_min_data_ptrs);
dblk_nelmts = H5EA_SBLK_DBLK_NELMTS(sblk_idx, cparam->data_blk_min_elmts);
if(dblk_page_nelmts < dblk_nelmts)
- H5E_THROW(H5E_BADVALUE, "max. # of elements per data block page bits must be > # of elements in first data block from super block")
+ H5E_THROW(H5E_BADVALUE, "max. # of elements per data block page bits must be > # of elements in first data block from super block")
if(cparam->max_dblk_page_nelmts_bits > cparam->max_nelmts_bits)
- H5E_THROW(H5E_BADVALUE, "max. # of elements per data block page bits must be <= max. # of elements bits")
+ H5E_THROW(H5E_BADVALUE, "max. # of elements per data block page bits must be <= max. # of elements bits")
}
#endif /* NDEBUG */
/* Allocate space for the shared information */
if(NULL == (hdr = H5EA__hdr_alloc(f)))
- H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array shared header")
+ H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array shared header")
/* Set the internal parameters for the array */
hdr->idx_blk_addr = HADDR_UNDEF;
@@ -428,15 +431,15 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Finish initializing extensible array header */
if(H5EA__hdr_init(hdr, ctx_udata) < 0)
- H5E_THROW(H5E_CANTINIT, "initialization failed for extensible array header")
+ H5E_THROW(H5E_CANTINIT, "initialization failed for extensible array header")
/* Allocate space for the header on disk */
if(HADDR_UNDEF == (hdr->addr = H5MF_alloc(f, H5FD_MEM_EARRAY_HDR, dxpl_id, (hsize_t)hdr->size)))
- H5E_THROW(H5E_CANTALLOC, "file allocation failed for extensible array header")
+ H5E_THROW(H5E_CANTALLOC, "file allocation failed for extensible array header")
/* Cache the new extensible array header */
if(H5AC_insert_entry(f, dxpl_id, H5AC_EARRAY_HDR, hdr->addr, hdr, H5AC__NO_FLAGS_SET) < 0)
- H5E_THROW(H5E_CANTINSERT, "can't add extensible array header to cache")
+ H5E_THROW(H5E_CANTINSERT, "can't add extensible array header to cache")
/* Set address of array header to return */
ret_value = hdr->addr;
diff --git a/src/H5EApkg.h b/src/H5EApkg.h
index 3af1b2c..147a612 100644
--- a/src/H5EApkg.h
+++ b/src/H5EApkg.h
@@ -14,12 +14,12 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Quincey Koziol <koziol@hdfgroup.org>
- * Tuesday, June 17, 2008
+ * Programmer: Quincey Koziol <koziol@hdfgroup.org>
+ * Tuesday, June 17, 2008
*
- * Purpose: This file contains declarations which are visible only within
- * the H5EA package. Source files outside the H5EA package should
- * include H5EAprivate.h instead.
+ * Purpose: This file contains declarations which are visible only within
+ * the H5EA package. Source files outside the H5EA package should
+ * include H5EAprivate.h instead.
*/
#if !(defined(H5EA_FRIEND) | defined(H5EA_MODULE))
#error "Do not include this file outside the H5EA package!"
@@ -32,7 +32,7 @@
#include "H5EAprivate.h"
/* Other private headers needed by this file */
-#include "H5FLprivate.h" /* Free Lists */
+#include "H5FLprivate.h" /* Free Lists */
/**************************/
@@ -95,10 +95,10 @@
/* General metadata fields */ \
H5EA_METADATA_PREFIX_SIZE(TRUE) \
\
- /* Sanity-checking fields */ \
+ /* Sanity-checking fields */ \
+ (i)->hdr->sizeof_addr /* File address of array owning the block */ \
\
- /* Extensible Array Index Block specific fields */ \
+ /* Extensible Array Index Block specific fields */ \
+ ((size_t)(i)->hdr->cparam.idx_blk_elmts * (size_t)(i)->hdr->cparam.raw_elmt_size) /* Elements in index block */ \
+ ((i)->ndblk_addrs * (i)->hdr->sizeof_addr) /* Data block addresses in index block */ \
+ ((i)->nsblk_addrs * (i)->hdr->sizeof_addr) /* Super block addresses in index block */ \
@@ -109,31 +109,31 @@
/* General metadata fields */ \
H5EA_METADATA_PREFIX_SIZE(TRUE) \
\
- /* Sanity-checking fields */ \
+ /* Sanity-checking fields */ \
+ (s)->hdr->sizeof_addr /* File address of array owning the block */ \
+ (s)->hdr->arr_off_size /* Offset of the block in the array */ \
\
- /* Extensible Array Super Block specific fields */ \
+ /* Extensible Array Super Block specific fields */ \
+ ((s)->ndblks * (s)->dblk_page_init_size) /* Data block 'page init' bitmasks in super block (can be 0 if no pages) */ \
+ ((s)->ndblks * (s)->hdr->sizeof_addr) /* Data block addresses in super block */ \
)
/* Size of the extensible array data block prefix on disk */
-#define H5EA_DBLOCK_PREFIX_SIZE(d) ( \
+#define H5EA_DBLOCK_PREFIX_SIZE(d) ( \
/* General metadata fields */ \
H5EA_METADATA_PREFIX_SIZE(TRUE) \
\
- /* Sanity-checking fields */ \
+ /* Sanity-checking fields */ \
+ (d)->hdr->sizeof_addr /* File address of array owning the block */ \
+ (d)->hdr->arr_off_size /* Offset of the block in the array */ \
)
/* Size of the extensible array data block on disk */
-#define H5EA_DBLOCK_SIZE(d) ( \
+#define H5EA_DBLOCK_SIZE(d) ( \
/* Data block prefix size */ \
H5EA_DBLOCK_PREFIX_SIZE(d) \
\
- /* Extensible Array Data Block specific fields */ \
+ /* Extensible Array Data Block specific fields */ \
+ ((d)->nelmts * (size_t)(d)->hdr->cparam.raw_elmt_size) /* Elements in data block */ \
+ ((d)->npages * H5EA_SIZEOF_CHKSUM) /* Checksum for each page */ \
)
@@ -191,6 +191,7 @@ typedef struct H5EA_hdr_t {
haddr_t addr; /* Address of header in file */
size_t size; /* Size of header in file */
H5F_t *f; /* Pointer to file for extensible array */
+ hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */
size_t file_rc; /* Reference count of files using array header */
hbool_t pending_delete; /* Array is pending deletion */
size_t sizeof_addr; /* Size of file addresses */
@@ -206,6 +207,33 @@ typedef struct H5EA_hdr_t {
/* Client information (not stored) */
void *cb_ctx; /* Callback context */
+
+ /* Flush depencency parent information (not stored) */
+ haddr_t fd_parent_addr; /* Address of flush dependency parent,
+ * if any. This field is initialized
+ * to HADDR_UNDEF. If the extensible
+ * array is being used to index a
+ * chunked data set and the data set
+ * metadata is modified by a SWMR
+ * writer, this field will be set equal
+ * to the object header proxy that is
+ * the flush dependency parent of the
+ * extensible array header.
+ *
+ * The field is used to avoid duplicate
+ * setups of the flush dependency
+ * relationship, and to allow the
+ * extensible array header to destroy
+ * the flush dependency on receipt of
+ * an eviction notification from the
+ * metadata cache.
+ */
+ H5AC_info_t *fd_parent_ptr; /* Pointer to flush dependency parent,
+ * if it exists, otherwise NULL. (See
+ * comment for fd_parent_addr above for
+ * further details)
+ */
+
} H5EA_hdr_t;
/* The extensible array index block information */
@@ -219,9 +247,9 @@ typedef struct H5EA_iblock_t {
haddr_t *sblk_addrs; /* Buffer for addresses of super blocks in index block */
/* Internal array information (not stored) */
- H5EA_hdr_t *hdr; /* Shared array header info */
- haddr_t addr; /* Address of this index block on disk */
- size_t size; /* Size of index block on disk */
+ H5EA_hdr_t *hdr; /* Shared array header info */
+ haddr_t addr; /* Address of this index block on disk */
+ size_t size; /* Size of index block on disk */
/* Computed/cached values (not stored) */
size_t nsblks; /* # of super blocks whose data block addresses are in index block */
@@ -240,10 +268,10 @@ typedef struct H5EA_sblock_t {
uint8_t *page_init; /* Bitmap of whether a data block page is initialized */
/* Internal array information (not stored) */
- H5EA_hdr_t *hdr; /* Shared array header info */
- H5EA_iblock_t *parent; /* Parent object for super block (index block) */
- haddr_t addr; /* Address of this index block on disk */
- size_t size; /* Size of index block on disk */
+ H5EA_hdr_t *hdr; /* Shared array header info */
+ H5EA_iblock_t *parent; /* Parent object for super block (index block) */
+ haddr_t addr; /* Address of this index block on disk */
+ size_t size; /* Size of index block on disk */
/* Computed/cached values (not stored) */
unsigned idx; /* Super block index within the extensible array */
@@ -264,10 +292,10 @@ typedef struct H5EA_dblock_t {
void *elmts; /* Buffer for elements stored in data block */
/* Internal array information (not stored) */
- H5EA_hdr_t *hdr; /* Shared array header info */
+ H5EA_hdr_t *hdr; /* Shared array header info */
void *parent; /* Parent object for data block (index or super block) */
- haddr_t addr; /* Address of this data block on disk */
- size_t size; /* Size of data block on disk */
+ haddr_t addr; /* Address of this data block on disk */
+ size_t size; /* Size of data block on disk */
/* Computed/cached values (not stored) */
size_t nelmts; /* Number of elements in block */
@@ -283,10 +311,10 @@ typedef struct H5EA_dbk_page_t {
void *elmts; /* Buffer for elements stored in data block page */
/* Internal array information (not stored) */
- H5EA_hdr_t *hdr; /* Shared array header info */
+ H5EA_hdr_t *hdr; /* Shared array header info */
H5EA_sblock_t *parent; /* Parent object for data block page (super block) */
haddr_t addr; /* Address of this data block page on disk */
- size_t size; /* Size of data block page on disk */
+ size_t size; /* Size of data block page on disk */
/* Computed/cached values (not stored) */
/* <none> */
@@ -360,7 +388,7 @@ H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1];
H5_DLLVAR const H5EA_class_t H5EA_CLS_TEST[1];
/* Array of extensible array client ID -> client class mappings */
-extern const H5EA_class_t *const H5EA_client_class_g[H5EA_NUM_CLS_ID];
+H5_DLLVAR const H5EA_class_t *const H5EA_client_class_g[H5EA_NUM_CLS_ID];
/******************************/
diff --git a/src/H5EAprivate.h b/src/H5EAprivate.h
index 6ce3062..5203af7 100644
--- a/src/H5EAprivate.h
+++ b/src/H5EAprivate.h
@@ -15,11 +15,11 @@
/*-------------------------------------------------------------------------
*
- * Created: H5EAprivate.h
- * Jun 17 2008
- * Quincey Koziol <koziol@hdfgroup.org>
+ * Created: H5EAprivate.h
+ * Jun 17 2008
+ * Quincey Koziol <koziol@hdfgroup.org>
*
- * Purpose: Private header for library accessible extensible
+ * Purpose: Private header for library accessible extensible
* array routines.
*
*-------------------------------------------------------------------------
@@ -34,8 +34,8 @@
#endif /* NOT_YET */
/* Private headers needed by this file */
-#include "H5ACprivate.h" /* Metadata cache */
-#include "H5Fprivate.h" /* File access */
+#include "H5ACprivate.h" /* Metadata cache */
+#include "H5Fprivate.h" /* File access */
/**************************/
@@ -49,9 +49,12 @@
/* Extensible array class IDs */
typedef enum H5EA_cls_id_t {
+ H5EA_CLS_CHUNK_ID = 0, /* Extensible array is for indexing dataset chunks w/o filters */
+ H5EA_CLS_FILT_CHUNK_ID, /* Extensible array is for indexing dataset chunks w/filters */
+
/* Start real class IDs at 0 -QAK */
/* (keep these last) */
- H5EA_CLS_TEST_ID, /* Extensible array is for testing (do not use for actual data) */
+ H5EA_CLS_TEST_ID, /* Extensible array is for testing (do not use for actual data) */
H5EA_NUM_CLS_ID /* Number of Extensible Array class IDs (must be last) */
} H5EA_cls_id_t;
@@ -112,11 +115,20 @@ typedef struct H5EA_stat_t {
/* Extensible array info (forward decl - defined in H5EApkg.h) */
typedef struct H5EA_t H5EA_t;
+/* Define the operator callback function pointer for H5EA_iterate() */
+typedef int (*H5EA_operator_t)(hsize_t idx, const void *_elmt, void *_udata);
+
/*****************************/
/* Library-private Variables */
/*****************************/
+/* The Extensible Array class for dataset chunks w/o filters*/
+H5_DLLVAR const H5EA_class_t H5EA_CLS_CHUNK[1];
+
+/* The Extensible Array class for dataset chunks w/ filters*/
+H5_DLLVAR const H5EA_class_t H5EA_CLS_FILT_CHUNK[1];
+
/***************************************/
/* Library-private Function Prototypes */
@@ -132,10 +144,7 @@ H5_DLL herr_t H5EA_set(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, const void
H5_DLL herr_t H5EA_get(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, void *elmt);
H5_DLL herr_t H5EA_depend(H5AC_info_t *parent_entry, H5EA_t *ea);
H5_DLL herr_t H5EA_undepend(H5AC_info_t *parent_entry, H5EA_t *ea);
-H5_DLL herr_t H5EA_support(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx,
- H5AC_info_t *child_entry);
-H5_DLL herr_t H5EA_unsupport(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx,
- H5AC_info_t *child_entry);
+H5_DLL herr_t H5EA_iterate(H5EA_t *fa, hid_t dxpl_id, H5EA_operator_t op, void *udata);
H5_DLL herr_t H5EA_close(H5EA_t *ea, hid_t dxpl_id);
H5_DLL herr_t H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata);
diff --git a/src/H5F.c b/src/H5F.c
index 041bd35..09765cb 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -58,7 +58,6 @@
/********************/
-
/*********************/
/* Package Variables */
/*********************/
@@ -456,10 +455,10 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
if(!filename || !*filename)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file name")
/* In this routine, we only accept the following flags:
- * H5F_ACC_EXCL and H5F_ACC_TRUNC
+ * H5F_ACC_EXCL, H5F_ACC_TRUNC and H5F_ACC_SWMR_WRITE
*/
- if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags")
+ if(flags & ~(H5F_ACC_EXCL | H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid flags")
/* The H5F_ACC_EXCL and H5F_ACC_TRUNC flags are mutually exclusive */
if((flags & H5F_ACC_EXCL) && (flags & H5F_ACC_TRUNC))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mutually exclusive flags for file creation")
@@ -565,6 +564,12 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id)
if((flags & ~H5F_ACC_PUBLIC_FLAGS) ||
(flags & H5F_ACC_TRUNC) || (flags & H5F_ACC_EXCL))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file open flags")
+ /* Asking for SWMR write access on a read-only file is invalid */
+ if((flags & H5F_ACC_SWMR_WRITE) && 0 == (flags & H5F_ACC_RDWR))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "SWMR write access on a file open for read-only access is not allowed")
+ /* Asking for SWMR read access on a non-read-only file is invalid */
+ if((flags & H5F_ACC_SWMR_READ) && (flags & H5F_ACC_RDWR))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "SWMR read access on a file open for read-write access is not allowed")
if(H5P_DEFAULT == fapl_id)
fapl_id = H5P_FILE_ACCESS_DEFAULT;
else
@@ -754,9 +759,17 @@ H5Fclose(hid_t file_id)
if((f->shared->nrefs > 1) && (H5F_INTENT(f) & H5F_ACC_RDWR)) {
if((nref = H5I_get_ref(file_id, FALSE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTGET, FAIL, "can't get ID ref count")
- if(nref == 1)
+ if(nref == 1) {
+ if(f->shared->sblock) { /* Clear status_flags */
+ f->shared->sblock->status_flags &= ~H5F_SUPER_WRITE_ACCESS;
+ f->shared->sblock->status_flags &= ~H5F_SUPER_SWMR_WRITE_ACCESS;
+ /* Mark superblock dirty in cache, so change will get encoded */
+ if(H5F_super_dirty(f) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+ }
if(H5F_flush(f, H5AC_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ }
} /* end if */
/*
@@ -863,10 +876,20 @@ H5Fget_intent(hid_t file_id, unsigned *intent_flags)
* Simplify things for them so that they only get either H5F_ACC_RDWR
* or H5F_ACC_RDONLY.
*/
- if(H5F_INTENT(file) & H5F_ACC_RDWR)
+ if(H5F_INTENT(file) & H5F_ACC_RDWR) {
*intent_flags = H5F_ACC_RDWR;
- else
+
+ /* Check for SWMR write access on the file */
+ if(H5F_INTENT(file) & H5F_ACC_SWMR_WRITE)
+ *intent_flags |= H5F_ACC_SWMR_WRITE;
+ } /* end if */
+ else {
*intent_flags = H5F_ACC_RDONLY;
+
+ /* Check for SWMR read access on the file */
+ if(H5F_INTENT(file) & H5F_ACC_SWMR_READ)
+ *intent_flags |= H5F_ACC_SWMR_READ;
+ } /* end else */
} /* end if */
done:
@@ -1373,6 +1396,99 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Fget_metadata_read_retry_info
+ *
+ * Purpose: To retrieve the collection of read retries for metadata items with checksum.
+ *
+ * Return: Success: non-negative on success
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; October 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fget_metadata_read_retry_info(hid_t file_id, H5F_retry_info_t *info)
+{
+ H5F_t *file; /* File object for file ID */
+ unsigned i, j; /* Local index variable */
+ size_t tot_size; /* Size of each retries[i] */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*x", file_id, info);
+
+ /* Check args */
+ if(!info)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no info struct")
+
+ /* Get the file pointer */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID")
+
+ /* Copy the # of bins for "retries" array */
+ info->nbins = file->shared->retries_nbins;
+
+ /* Initialize the array of "retries" */
+ HDmemset(info->retries, 0, sizeof(info->retries));
+
+ /* Return if there are no bins -- no retries */
+ if(!info->nbins)
+ HGOTO_DONE(SUCCEED);
+
+ /* Calculate size for each retries[i] */
+ tot_size = info->nbins * sizeof(uint32_t);
+
+ /* Map and copy information to info's retries for metadata items with tracking for read retries */
+ j = 0;
+ for(i = 0; i < H5AC_NTYPES; i++) {
+ switch(i) {
+ case H5AC_OHDR_ID:
+ case H5AC_OHDR_CHK_ID:
+ case H5AC_BT2_HDR_ID:
+ case H5AC_BT2_INT_ID:
+ case H5AC_BT2_LEAF_ID:
+ case H5AC_FHEAP_HDR_ID:
+ case H5AC_FHEAP_DBLOCK_ID:
+ case H5AC_FHEAP_IBLOCK_ID:
+ case H5AC_FSPACE_HDR_ID:
+ case H5AC_FSPACE_SINFO_ID:
+ case H5AC_SOHM_TABLE_ID:
+ case H5AC_SOHM_LIST_ID:
+ case H5AC_EARRAY_HDR_ID:
+ case H5AC_EARRAY_IBLOCK_ID:
+ case H5AC_EARRAY_SBLOCK_ID:
+ case H5AC_EARRAY_DBLOCK_ID:
+ case H5AC_EARRAY_DBLK_PAGE_ID:
+ case H5AC_FARRAY_HDR_ID:
+ case H5AC_FARRAY_DBLOCK_ID:
+ case H5AC_FARRAY_DBLK_PAGE_ID:
+ case H5AC_SUPERBLOCK_ID:
+ HDassert(j < H5F_NUM_METADATA_READ_RETRY_TYPES);
+ if(file->shared->retries[i] != NULL) {
+ /* Allocate memory for retries[i] */
+ if(NULL == (info->retries[j] = (uint32_t *)HDmalloc(tot_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Copy the information */
+ HDmemcpy(info->retries[j], file->shared->retries[i], tot_size);
+ } /* end if */
+
+ /* Increment location in info->retries[] array */
+ j++;
+ break;
+
+ default:
+ break;
+ } /* end switch */
+ } /* end for */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Fget_metadata_read_retry_info() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5Fget_free_sections
*
* Purpose: To get free-space section information for free-space manager with
@@ -1448,3 +1564,314 @@ done:
FUNC_LEAVE_API(ret_value)
} /* end H5Fclear_elink_file_cache() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fstart_swmr_write
+ *
+ * Purpose: To enable SWMR writing mode for the file
+ * 1) Refresh opened objects: part 1
+ * 2) Flush & reset accumulator
+ * 3) Mark the file in SWMR writing mode
+ * 4) Set metadata read attempts and retries info
+ * 5) Disable accumulator
+ * 6) Evict all cache entries except the superblock
+ * 7) Refresh opened objects (part 2)
+ * 8) Unlock the file
+ *
+ * Pre-conditions:
+ * 1) The file being opened has v3 superblock
+ * 2) The file is opened with H5F_ACC_RDWR
+ * 3) The file is not already marked for SWMR writing
+ * 4) Current implementaion for opened objects:
+ * --only allow datasets and groups without attributes
+ * --disallow named datatype with/without attributes
+ * --disallow opened attributes attached to objects
+ * NOTE: Currently, only opened groups and datasets are allowed
+ * when enabling SWMR via H5Fstart_swmr_write().
+ * Will later implement a different approach--
+ * set up flush dependency/proxy even for file opened without
+ * SWMR to resolve issues with opened objects.
+ *
+ * Return: Non-negative on success/negative on failure
+ *
+ * Programmer:
+ * Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fstart_swmr_write(hid_t file_id)
+{
+ H5F_t *file = NULL; /* File info */
+ size_t grp_dset_count=0; /* # of open objects: groups & datasets */
+ size_t nt_attr_count=0; /* # of opened named datatypes + opened attributes */
+ hid_t *obj_ids=NULL; /* List of ids */
+ H5G_loc_t *obj_glocs=NULL; /* Group location of the object */
+ H5O_loc_t *obj_olocs=NULL; /* Object location */
+ H5G_name_t *obj_paths=NULL; /* Group hierarchy path */
+ size_t u; /* Local index variable */
+ hbool_t setup = FALSE; /* Boolean flag to indicate whether SWMR setting is enabled */
+ H5F_io_info_t fio_info; /* I/O info for operation */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", file_id);
+
+ /* check args */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file")
+
+ /* Should have write permission */
+ if((H5F_INTENT(file) & H5F_ACC_RDWR) == 0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "no write intent on file")
+
+ if(file->shared->sblock->super_vers < HDF5_SUPERBLOCK_VERSION_3)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "file superblock version should be at least 3")
+ HDassert(file->shared->latest_flags == H5F_LATEST_ALL_FLAGS);
+
+ /* Should not be marked for SWMR writing mode already */
+ if(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "file already in SWMR writing mode")
+
+ HDassert(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS);
+
+ /* Flush data buffers */
+ if(H5F_flush(file, H5AC_dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
+
+ /* Get the # of opened named datatypes and attributes */
+ if(H5F_get_obj_count(file, H5F_OBJ_LOCAL|H5F_OBJ_DATATYPE|H5F_OBJ_ATTR, FALSE, &nt_attr_count) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADITER, FAIL, "H5F_get_obj_count failed")
+ if(nt_attr_count)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "named datatypes and/or attributes opened in the file")
+
+ /* Get the # of opened datasets and groups */
+ if(H5F_get_obj_count(file, H5F_OBJ_LOCAL|H5F_OBJ_GROUP|H5F_OBJ_DATASET, FALSE, &grp_dset_count) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADITER, FAIL, "H5F_get_obj_count failed")
+
+ if(grp_dset_count) {
+ /* Allocate space for group and object locations */
+ if((obj_ids = (hid_t *) H5MM_malloc(grp_dset_count * sizeof(hid_t))) == NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for hid_t")
+ if((obj_glocs = (H5G_loc_t *) H5MM_malloc(grp_dset_count * sizeof(H5G_loc_t))) == NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for H5G_loc_t")
+ if((obj_olocs = (H5O_loc_t *) H5MM_malloc(grp_dset_count * sizeof(H5O_loc_t))) == NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for H5O_loc_t")
+ if((obj_paths = (H5G_name_t *) H5MM_malloc(grp_dset_count * sizeof(H5G_name_t))) == NULL)
+ HGOTO_ERROR(H5E_FILE, H5E_NOSPACE, FAIL, "can't allocate buffer for H5G_name_t")
+
+ /* Get the list of opened object ids (groups & datasets) */
+ if(H5F_get_obj_ids(file, H5F_OBJ_LOCAL|H5F_OBJ_GROUP|H5F_OBJ_DATASET, grp_dset_count, obj_ids, FALSE, &grp_dset_count) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "H5F_get_obj_ids failed")
+
+ /* Refresh opened objects (groups, datasets) in the file */
+ for(u = 0; u < grp_dset_count; u++) {
+ H5O_loc_t *oloc; /* object location */
+
+ /* Set up the id's group location */
+ obj_glocs[u].oloc = &obj_olocs[u];
+ obj_glocs[u].path = &obj_paths[u];
+ H5G_loc_reset(&obj_glocs[u]);
+
+ /* get the id's object location */
+ if((oloc = H5O_get_loc(obj_ids[u])) == NULL)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an object")
+
+ /* Refresh (part 1) */
+ if(H5O_refresh_metadata_close(obj_ids[u], *oloc, &obj_glocs[u], H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_CLOSEERROR, FAIL, "can't refresh-close object")
+ } /* end for */
+ } /* end if */
+
+ /* Set up I/O info for operation */
+ fio_info.f = file;
+ if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
+ /* Flush and reset the accumulator */
+ if(H5F__accum_reset(&fio_info, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator")
+
+ /* Turn on SWMR write in shared file open flags */
+ file->shared->flags |= H5F_ACC_SWMR_WRITE;
+
+ /* Mark the file in SWMR writing mode */
+ file->shared->sblock->status_flags |= H5F_SUPER_SWMR_WRITE_ACCESS;
+
+ /* Set up metadata read attempts */
+ file->shared->read_attempts = H5F_SWMR_METADATA_READ_ATTEMPTS;
+
+ /* Initialize "retries" and "retries_nbins" */
+ if(H5F_set_retries(file) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "can't set retries and retries_nbins")
+
+ /* Turn off usage of accumulator */
+ file->shared->feature_flags &= ~(unsigned)H5FD_FEAT_ACCUMULATE_METADATA;
+ if(H5FD_set_feature_flags(file->shared->lf, file->shared->feature_flags) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "can't set feature_flags in VFD")
+
+ setup = TRUE;
+
+ /* Mark superblock as dirty */
+ if(H5F_super_dirty(file) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+
+ /* Flush the superblock */
+ if(H5F_flush_tagged_metadata(file, (haddr_t)0, H5AC_dxpl_id) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush superblock")
+
+ /* Evict all flushed entries in the cache except the pinned superblock */
+ if(H5F_evict_cache_entries(file, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to evict file's cached information")
+
+ /* Refresh (part 2: reopen) the objects (groups & datasets) in the file */
+ for(u = 0; u < grp_dset_count; u++) {
+ if(H5O_refresh_metadata_reopen(obj_ids[u], &obj_glocs[u], H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_CLOSEERROR, FAIL, "can't refresh-close object")
+ }
+
+ /* Unlock the file */
+ if(H5FD_unlock(file->shared->lf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to unlock the file")
+done:
+ if(ret_value < 0 && setup) {
+ HDassert(file);
+
+ /* Re-enable accumulator */
+ file->shared->feature_flags |= (unsigned)H5FD_FEAT_ACCUMULATE_METADATA;
+ if(H5FD_set_feature_flags(file->shared->lf, file->shared->feature_flags) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "can't set feature_flags in VFD")
+
+ /* Reset the # of read attempts */
+ file->shared->read_attempts = H5F_METADATA_READ_ATTEMPTS;
+ if(H5F_set_retries(file) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "can't set retries and retries_nbins")
+
+ /* Un-set H5F_ACC_SWMR_WRITE in shared open flags */
+ file->shared->flags &= ~H5F_ACC_SWMR_WRITE;
+
+ /* Unmark the file: not in SWMR writing mode */
+ file->shared->sblock->status_flags &= ~(uint8_t)H5F_SUPER_SWMR_WRITE_ACCESS;
+
+ /* Mark superblock as dirty */
+ if(H5F_super_dirty(file) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+
+ /* Flush the superblock */
+ if(H5F_flush_tagged_metadata(file, (haddr_t)0, H5AC_dxpl_id) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush superblock")
+ }
+ /* Free memory */
+ if(obj_ids)
+ H5MM_xfree(obj_ids);
+ if(obj_glocs)
+ H5MM_xfree(obj_glocs);
+ if(obj_olocs)
+ H5MM_xfree(obj_olocs);
+ if(obj_paths)
+ H5MM_xfree(obj_paths);
+
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Fstart_swmr_write() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fstart_mdc_logging
+ *
+ * Purpose: Start metadata cache logging operations for a file.
+ * - Logging must have been set up via the fapl.
+ *
+ * Return: Non-negative on success/Negative on errors
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fstart_mdc_logging(hid_t file_id)
+{
+ H5F_t *file; /* File info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", file_id);
+
+ /* Sanity check */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID")
+
+ /* Call mdc logging function */
+ if(H5C_start_logging(file->shared->cache) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to start mdc logging")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+
+} /* H5Fstart_mdc_logging() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fstop_mdc_logging
+ *
+ * Purpose: Stop metadata cache logging operations for a file.
+ * - Does not close the log file.
+ * - Logging must have been set up via the fapl.
+ *
+ * Return: Non-negative on success/Negative on errors
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fstop_mdc_logging(hid_t file_id)
+{
+ H5F_t *file; /* File info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", file_id);
+
+ /* Sanity check */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID")
+
+ /* Call mdc logging function */
+ if(H5C_stop_logging(file->shared->cache) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to stop mdc logging")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+
+} /* H5Fstop_mdc_logging() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fget_mdc_logging_status
+ *
+ * Purpose: Get the logging flags. is_enabled determines if logging was
+ * set up via the fapl. is_currently_logging determines if
+ * log messages are being recorded at this time.
+ *
+ * Return: Non-negative on success/Negative on errors
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fget_mdc_logging_status(hid_t file_id, hbool_t *is_enabled,
+ hbool_t *is_currently_logging)
+{
+ H5F_t *file; /* File info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "i*b*b", file_id, is_enabled, is_currently_logging);
+
+ /* Sanity check */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hid_t identifier is not a file ID")
+
+ /* Call mdc logging function */
+ if(H5C_get_logging_status(file->shared->cache, is_enabled, is_currently_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to get logging status")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+
+} /* H5Fstop_mdc_logging() */
diff --git a/src/H5FA.c b/src/H5FA.c
index 1be3f2e..a1b4fbc 100644
--- a/src/H5FA.c
+++ b/src/H5FA.c
@@ -79,7 +79,9 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
* client class..
*/
const H5FA_class_t *const H5FA_client_class_g[] = {
- H5FA_CLS_TEST, /* ? - H5FA_CLS_TEST_ID */
+ H5FA_CLS_CHUNK, /* 0 - H5FA_CLS_CHUNK_ID */
+ H5FA_CLS_FILT_CHUNK, /* 1 - H5FA_CLS_FILT_CHUNK_ID */
+ H5FA_CLS_TEST, /* ? - H5FA_CLS_TEST_ID */
};
@@ -739,3 +741,112 @@ CATCH
END_FUNC(PRIV) /* end H5FA_iterate() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA_depend
+ *
+ * Purpose: Make a child flush dependency between the fixed array's
+ * header and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PRIV, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA_depend(H5AC_info_t *parent_entry, H5FA_t *fa))
+
+ /* Local variables */
+ H5FA_hdr_t *hdr = fa->hdr; /* Header for FA */
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ *
+ * At present, this function is only used to setup a flush dependency
+ * between an object header proxy and the extensible array header when
+ * the extensible array is being used to index a chunked data set.
+ *
+ * Make sure that the parameters are congruent with this.
+ */
+ HDassert(fa);
+ HDassert(hdr);
+ HDassert(parent_entry);
+ HDassert(parent_entry->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(parent_entry->type);
+ HDassert(parent_entry->type->id == H5AC_OHDR_PROXY_ID);
+ HDassert((hdr->fd_parent_addr == HADDR_UNDEF) ||
+ (hdr->fd_parent_addr == parent_entry->addr));
+ HDassert((hdr->fd_parent_ptr == NULL) ||
+ (hdr->fd_parent_ptr == parent_entry));
+
+ /*
+ * Check to see if the flush dependency between the object header proxy
+ * and the fixed array header has already been setup. If it hasn't
+ * set it up.
+ */
+ if(!H5F_addr_defined(hdr->fd_parent_addr)) {
+ /* Set the shared array header's file context for this operation */
+ hdr->f = fa->f;
+
+ /* Set up flush dependency between parent entry and fixed
+ * array header
+ */
+ if(H5FA__create_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency on file metadata")
+
+ hdr->fd_parent_addr = parent_entry->addr;
+ hdr->fd_parent_ptr = parent_entry;
+ } /* end if */
+
+CATCH
+
+END_FUNC(PRIV) /* end H5FA_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA_undepend
+ *
+ * Purpose: Remove a child flush dependency between the fixed array's
+ * header and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PRIV, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA_undepend(H5AC_info_t *parent_entry, H5FA_t *fa))
+
+ /* Local variables */
+ H5FA_hdr_t *hdr = fa->hdr; /* Header for FA */
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(fa);
+ HDassert(hdr);
+
+ /* Set the shared array header's file context for this operation */
+ hdr->f = fa->f;
+
+ /* Remove flush dependency between parent entry and fixed array header */
+ if(H5FA__destroy_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency on file metadata")
+
+CATCH
+
+END_FUNC(PRIV) /* end H5FA_undepend() */
diff --git a/src/H5FAcache.c b/src/H5FAcache.c
index 1d4afa4..b4e9dfc 100644
--- a/src/H5FAcache.c
+++ b/src/H5FAcache.c
@@ -71,16 +71,23 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static herr_t H5FA__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5FA__cache_hdr_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5FA__cache_hdr_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5FA__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FA__cache_hdr_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5FA__cache_hdr_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5FA__cache_hdr_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5FA__cache_hdr_free_icr(void *thing);
-static herr_t H5FA__cache_dblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5FA__cache_dblock_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5FA__cache_dblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5FA__cache_dblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FA__cache_dblock_image_len(const void *thing,
@@ -88,10 +95,14 @@ static herr_t H5FA__cache_dblock_image_len(const void *thing,
size_t *compressed_image_len_ptr);
static herr_t H5FA__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5FA__cache_dblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5FA__cache_dblock_free_icr(void *thing);
static herr_t H5FA__cache_dblock_fsf_size(const void *thing, size_t *fsf_size);
-static herr_t H5FA__cache_dblk_page_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5FA__cache_dblk_page_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5FA__cache_dblk_page_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5FA__cache_dblk_page_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FA__cache_dblk_page_image_len(const void *thing,
@@ -113,11 +124,12 @@ const H5AC_class_t H5AC_FARRAY_HDR[1] = {{
H5FD_MEM_FARRAY_HDR, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5FA__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5FA__cache_hdr_verify_chksum, /* 'verify_chksum' callback */
H5FA__cache_hdr_deserialize, /* 'deserialize' callback */
H5FA__cache_hdr_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5FA__cache_hdr_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5FA__cache_hdr_notify, /* 'notify' callback */
H5FA__cache_hdr_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -130,11 +142,12 @@ const H5AC_class_t H5AC_FARRAY_DBLOCK[1] = {{
H5FD_MEM_FARRAY_DBLOCK, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5FA__cache_dblock_get_load_size, /* 'get_load_size' callback */
+ H5FA__cache_dblock_verify_chksum, /* 'verify_chksum' callback */
H5FA__cache_dblock_deserialize, /* 'deserialize' callback */
H5FA__cache_dblock_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5FA__cache_dblock_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5FA__cache_dblock_notify, /* 'notify' callback */
H5FA__cache_dblock_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
H5FA__cache_dblock_fsf_size, /* 'fsf_size' callback */
@@ -147,6 +160,7 @@ const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1] = {{
H5FD_MEM_FARRAY_DBLK_PAGE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5FA__cache_dblk_page_get_load_size, /* 'get_load_size' callback */
+ H5FA__cache_dblk_page_verify_chksum, /* 'verify_chksum' callback */
H5FA__cache_dblk_page_deserialize, /* 'deserialize' callback */
H5FA__cache_dblk_page_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -184,23 +198,65 @@ const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1] = {{
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5FA__cache_hdr_get_load_size(const void *_udata, size_t *image_len))
+H5FA__cache_hdr_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5FA_hdr_cache_ud_t *udata = (const H5FA_hdr_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5FA_hdr_cache_ud_t *udata = (H5FA_hdr_cache_ud_t *)_udata; /* User data for callback */
/* Check arguments */
HDassert(udata);
HDassert(udata->f);
HDassert(image_len);
- /* Set the image length size */
- *image_len = (size_t)H5FA_HEADER_SIZE_FILE(udata->f);
+ if(image == NULL) {
+ /* Set the image length size */
+ *image_len = (size_t)H5FA_HEADER_SIZE_FILE(udata->f);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5FA__cache_hdr_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_hdr_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5FA__cache_hdr_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5FA__cache_hdr_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FA__cache_hdr_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -225,7 +281,6 @@ H5FA__cache_hdr_deserialize(const void *_image, size_t len,
H5FA_hdr_cache_ud_t *udata = (H5FA_hdr_cache_ud_t *)_udata;
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
/* Check arguments */
HDassert(udata);
@@ -268,30 +323,28 @@ H5FA__cache_hdr_deserialize(const void *_image, size_t len,
/* Check for data block */
if(H5F_addr_defined(hdr->dblk_addr)) {
- H5FA_dblock_t dblock; /* Fake data block for computing size */
- size_t dblk_page_nelmts; /* # of elements per data block page */
-
- /* Set up fake data block for computing size on disk */
- dblock.hdr = hdr;
- dblock.dblk_page_init_size = 0;
- dblock.npages = 0;
- dblk_page_nelmts = (size_t)1 << hdr->cparam.max_dblk_page_nelmts_bits;
- if(hdr->cparam.nelmts > dblk_page_nelmts) {
- dblock.npages = (size_t)(((hdr->cparam.nelmts + dblk_page_nelmts) - 1) / dblk_page_nelmts);
- dblock.dblk_page_init_size = (dblock.npages + 7) / 8;
- } /* end if */
+ H5FA_dblock_t dblock; /* Fake data block for computing size */
+ size_t dblk_page_nelmts; /* # of elements per data block page */
+
+ /* Set up fake data block for computing size on disk */
+ dblock.hdr = hdr;
+ dblock.dblk_page_init_size = 0;
+ dblock.npages = 0;
+ dblk_page_nelmts = (size_t)1 << hdr->cparam.max_dblk_page_nelmts_bits;
+ if(hdr->cparam.nelmts > dblk_page_nelmts) {
+ dblock.npages = (size_t)(((hdr->cparam.nelmts + dblk_page_nelmts) - 1) / dblk_page_nelmts);
+ dblock.dblk_page_init_size = (dblock.npages + 7) / 8;
+ } /* end if */
/* Compute Fixed Array data block size for hdr statistics */
- hdr->stats.dblk_size = (size_t)H5FA_DBLOCK_SIZE(&dblock);
+ hdr->stats.dblk_size = (size_t)H5FA_DBLOCK_SIZE(&dblock);
} /* end if */
/* Sanity check */
/* (allow for checksum not decoded yet) */
HDassert((size_t)(image - (const uint8_t *)_image) == (len - H5FA_SIZEOF_CHKSUM));
- /* Compute checksum on entire header */
- /* (including the filter information, if present) */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -299,13 +352,9 @@ H5FA__cache_hdr_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == len);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for fixed array header")
-
/* Finish initializing fixed array header */
if(H5FA__hdr_init(hdr, udata->ctx_udata) < 0)
- H5E_THROW(H5E_CANTINIT, "initialization failed for fixed array header")
+ H5E_THROW(H5E_CANTINIT, "initialization failed for fixed array header")
HDassert(hdr->size == len);
/* Set return value */
@@ -413,6 +462,82 @@ END_FUNC(STATIC) /* end H5FA__cache_hdr_serialize() */
/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_hdr_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Dana Robinson
+ * December 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA__cache_hdr_notify(H5AC_notify_action_t action, void *_thing))
+
+ /* Local variables */
+ H5FA_hdr_t *hdr = (H5FA_hdr_t *)_thing; /* Pointer to the object */
+
+ /* Sanity check */
+ HDassert(hdr);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* If hdr->fd_parent_addr != HADDR_UNDEF, the fixed
+ * array header must be employed as the index for a chunked
+ * data set which has been modified by the SWMR writer.
+ *
+ * In this case, hdr->fd_parent_addr must contain the
+ * address of object header proxy which is the flush
+ * dependency parent of the fixed array header.
+ *
+ * hdr->fd_parent_addr (and hdr->fd_parent_ptr) are used to
+ * destroy the flush dependency before the fixed array
+ * header is evicted.
+ */
+ if(hdr->fd_parent_addr != HADDR_UNDEF) {
+ HDassert(hdr->fd_parent_ptr);
+ HDassert(hdr->fd_parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(hdr->fd_parent_ptr->addr == hdr->fd_parent_addr);
+ HDassert(hdr->fd_parent_ptr->type);
+ HDassert(hdr->fd_parent_ptr->type->id == H5AC_OHDR_PROXY_ID);
+
+ /* Destroy flush dependency on object header proxy */
+ if(H5FA__destroy_flush_depend((H5AC_info_t *)hdr->fd_parent_ptr, (H5AC_info_t *)hdr) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency between fa header and object header proxy, address = %llu", (unsigned long long)hdr->fd_parent_addr)
+ } /* end if */
+ break;
+
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+ else {
+ HDassert(hdr->fd_parent_addr == HADDR_UNDEF);
+ HDassert(hdr->fd_parent_ptr == NULL);
+ } /* end else */
+
+CATCH
+
+END_FUNC(STATIC) /* end H5FA__cache_hdr_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FA__cache_hdr_free_icr
*
* Purpose: Destroy/release an "in core representation" of a data
@@ -457,46 +582,89 @@ END_FUNC(STATIC) /* end H5FA__cache_hdr_free_icr() */
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5FA__cache_dblock_get_load_size(const void *_udata, size_t *image_len))
+H5FA__cache_dblock_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5FA_dblock_cache_ud_t *udata = (const H5FA_dblock_cache_ud_t *)_udata; /* User data */
- H5FA_dblock_t dblock; /* Fake data block for computing size */
- size_t dblk_page_nelmts; /* # of elements per data block page */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5FA_dblock_cache_ud_t *udata = (H5FA_dblock_cache_ud_t *)_udata; /* User data */
+ H5FA_dblock_t dblock; /* Fake data block for computing size */
+ size_t dblk_page_nelmts; /* # of elements per data block page */
/* Check arguments */
HDassert(udata);
HDassert(udata->hdr);
HDassert(image_len);
- /* Set up fake data block for computing size on disk */
- /* (Note: extracted from H5FA__dblock_alloc) */
- HDmemset(&dblock, 0, sizeof(dblock));
-
- /* Set up fake data block for computing size on disk
- *
- * need: dblock->hdr
- * dblock->npages
- * dblock->dblk_page_init_size
- */
-
- dblock.hdr = udata->hdr;
- dblk_page_nelmts = (size_t)1 << udata->hdr->cparam.max_dblk_page_nelmts_bits;
- if(udata->hdr->cparam.nelmts > dblk_page_nelmts) {
- dblock.npages = (size_t)(((udata->hdr->cparam.nelmts + dblk_page_nelmts) - 1) / dblk_page_nelmts);
- dblock.dblk_page_init_size = (dblock.npages + 7) / 8;
- } /* end if */
+ if(image == NULL) {
- /* Set the image length size */
- if(!dblock.npages)
- *image_len = (size_t)H5FA_DBLOCK_SIZE(&dblock);
- else
- *image_len = (size_t)H5FA_DBLOCK_PREFIX_SIZE(&dblock);
+ /* Set up fake data block for computing size on disk */
+ /* (Note: extracted from H5FA__dblock_alloc) */
+ HDmemset(&dblock, 0, sizeof(dblock));
+
+ /* Set up fake data block for computing size on disk
+ *
+ * need: dblock->hdr
+ * dblock->npages
+ * dblock->dblk_page_init_size
+ */
+
+ dblock.hdr = udata->hdr;
+ dblk_page_nelmts = (size_t)1 << udata->hdr->cparam.max_dblk_page_nelmts_bits;
+ if(udata->hdr->cparam.nelmts > dblk_page_nelmts) {
+ dblock.npages = (size_t)(((udata->hdr->cparam.nelmts + dblk_page_nelmts) - 1) / dblk_page_nelmts);
+ dblock.dblk_page_init_size = (dblock.npages + 7) / 8;
+ } /* end if */
+
+ /* Set the image length size */
+ if(!dblock.npages)
+ *image_len = (size_t)H5FA_DBLOCK_SIZE(&dblock);
+ else
+ *image_len = (size_t)H5FA_DBLOCK_PREFIX_SIZE(&dblock);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5FA__cache_dblock_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_dblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5FA__cache_dblock_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5FA__cache_dblock_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FA__cache_dblock_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -520,7 +688,6 @@ H5FA__cache_dblock_deserialize(const void *_image, size_t len,
H5FA_dblock_cache_ud_t *udata = (H5FA_dblock_cache_ud_t *)_udata; /* User data for loading data block */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
haddr_t arr_addr; /* Address of array header in the file */
/* Sanity check */
@@ -577,8 +744,7 @@ H5FA__cache_dblock_deserialize(const void *_image, size_t len,
/* Set the data block's size */
dblock->size = H5FA_DBLOCK_SIZE(dblock);
- /* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -586,10 +752,6 @@ H5FA__cache_dblock_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == len);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for fixed array data block")
-
/* Set return value */
ret_value = dblock;
@@ -710,6 +872,66 @@ CATCH
END_FUNC(STATIC) /* end H5FA__cache_dblock_serialize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_dblock_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing))
+
+ /* Local variables */
+ H5FA_dblock_t *dblock = (H5FA_dblock_t *)_thing;
+
+ /* Sanity check */
+ HDassert(dblock);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(dblock->hdr->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5FA__create_flush_depend((H5AC_info_t *)dblock->hdr, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency between data block and header, address = %llu", (unsigned long long)dblock->addr)
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5FA__destroy_flush_depend((H5AC_info_t *)dblock->hdr, (H5AC_info_t *)dblock) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency")
+
+ break;
+
+ default:
+#ifdef NDEBUG
+ H5E_THROW(H5E_BADVALUE, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+CATCH
+
+END_FUNC(STATIC) /* end H5FA__cache_dblock_notify() */
+
+
/*-------------------------------------------------------------------------
* Function: H5FA__cache_dblock_free_icr
@@ -804,10 +1026,13 @@ END_FUNC(STATIC) /* end H5FA__cache_dblock_fsf_size() */
*/
BEGIN_FUNC(STATIC, NOERR,
herr_t, SUCCEED, -,
-H5FA__cache_dblk_page_get_load_size(const void *_udata, size_t *image_len))
+H5FA__cache_dblk_page_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr))
/* Local variables */
- const H5FA_dblk_page_cache_ud_t *udata = (const H5FA_dblk_page_cache_ud_t *)_udata; /* User data */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5FA_dblk_page_cache_ud_t *udata = (H5FA_dblk_page_cache_ud_t *)_udata; /* User data */
/* Check arguments */
HDassert(udata);
@@ -815,12 +1040,51 @@ H5FA__cache_dblk_page_get_load_size(const void *_udata, size_t *image_len))
HDassert(udata->nelmts > 0);
HDassert(image_len);
- *image_len = (size_t)H5FA_DBLK_PAGE_SIZE(udata->hdr, udata->nelmts);
+ if(image == NULL)
+ *image_len = (size_t)H5FA_DBLK_PAGE_SIZE(udata->hdr, udata->nelmts);
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
END_FUNC(STATIC) /* end H5FA__cache_dblk_page_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5FA__cache_dblk_page_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(STATIC, NOERR,
+htri_t, TRUE, -,
+H5FA__cache_dblk_page_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata))
+
+ /* Local variables */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+END_FUNC(STATIC) /* end H5FA__cache_dblk_page_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FA__cache_dblk_page_deserialize
*
* Purpose: Loads a data structure from the disk.
@@ -840,11 +1104,10 @@ H5FA__cache_dblk_page_deserialize(const void *_image, size_t len,
void *_udata, hbool_t H5_ATTR_UNUSED *dirty))
/* Local variables */
- H5FA_dblk_page_t *dblk_page = NULL; /* Data block page info */
+ H5FA_dblk_page_t *dblk_page = NULL; /* Data block page info */
H5FA_dblk_page_cache_ud_t *udata = (H5FA_dblk_page_cache_ud_t *)_udata; /* User data for loading data block page */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
/* Sanity check */
HDassert(udata);
@@ -854,7 +1117,7 @@ H5FA__cache_dblk_page_deserialize(const void *_image, size_t len,
/* Allocate the fixed array data block page */
if(NULL == (dblk_page = H5FA__dblk_page_alloc(udata->hdr, udata->nelmts)))
- H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block page")
+ H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array data block page")
/* Set the fixed array data block's information */
dblk_page->addr = udata->dblk_page_addr;
@@ -874,8 +1137,7 @@ H5FA__cache_dblk_page_deserialize(const void *_image, size_t len,
/* Set the data block page's size */
dblk_page->size = len;
- /* Compute checksum on data block */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -883,10 +1145,6 @@ H5FA__cache_dblk_page_deserialize(const void *_image, size_t len,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == dblk_page->size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- H5E_THROW(H5E_BADVALUE, "incorrect metadata checksum for fixed array data block page")
-
/* Set return value */
ret_value = dblk_page;
@@ -1011,4 +1269,3 @@ H5FA__cache_dblk_page_free_icr(void *thing))
CATCH
END_FUNC(STATIC) /* end H5FA__cache_dblk_page_free_icr() */
-
diff --git a/src/H5FAhdr.c b/src/H5FAhdr.c
index 62b1837..3acf807 100644
--- a/src/H5FAhdr.c
+++ b/src/H5FAhdr.c
@@ -109,9 +109,12 @@ H5FA__hdr_alloc(H5F_t *f))
/* Set non-zero internal fields */
hdr->addr = HADDR_UNDEF;
+ hdr->fd_parent_addr = HADDR_UNDEF;
+ hdr->fd_parent_ptr = NULL;
/* Set the internal parameters for the array */
hdr->f = f;
+ hdr->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0;
hdr->sizeof_addr = H5F_SIZEOF_ADDR(f);
hdr->sizeof_size = H5F_SIZEOF_SIZE(f);
diff --git a/src/H5FAint.c b/src/H5FAint.c
new file mode 100644
index 0000000..331227b
--- /dev/null
+++ b/src/H5FAint.c
@@ -0,0 +1,139 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5FAint.c
+ * Fall 2012
+ * Dana Robinson <derobins@hdfgroup.org>
+ *
+ * Purpose: Internal routines for fixed arrays.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/**********************/
+/* Module Declaration */
+/**********************/
+
+#include "H5FAmodule.h" /* This source code file is part of the H5FA module */
+
+
+/***********************/
+/* Other Packages Used */
+/***********************/
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error Handling */
+#include "H5FApkg.h" /* Fixed Arrays */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__create_flush_depend
+ *
+ * Purpose: Create a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PKG, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry))
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Create a flush dependency between parent and child entry */
+ if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0)
+ H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency")
+
+CATCH
+
+END_FUNC(PKG) /* end H5FA__create_flush_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA__destroy_flush_depend
+ *
+ * Purpose: Destroy a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+BEGIN_FUNC(PKG, ERR,
+herr_t, SUCCEED, FAIL,
+H5FA__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry))
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Destroy a flush dependency between parent and child entry */
+ if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0)
+ H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency")
+
+CATCH
+
+END_FUNC(PKG) /* end H5FA__destroy_flush_depend() */
+
diff --git a/src/H5FApkg.h b/src/H5FApkg.h
index c29322a..890bf39 100644
--- a/src/H5FApkg.h
+++ b/src/H5FApkg.h
@@ -137,6 +137,7 @@ typedef struct H5FA_hdr_t {
haddr_t addr; /* Address of header in file */
size_t size; /* Size of header in file */
H5F_t *f; /* Pointer to file for fixed array */
+ hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */
size_t file_rc; /* Reference count of files using array header */
hbool_t pending_delete; /* Array is pending deletion */
size_t sizeof_addr; /* Size of file addresses */
@@ -144,6 +145,33 @@ typedef struct H5FA_hdr_t {
/* Client information (not stored) */
void *cb_ctx; /* Callback context */
+
+ /* Flush depencency parent information (not stored) */
+ haddr_t fd_parent_addr; /* Address of flush dependency parent,
+ * if any. This field is initialized
+ * to HADDR_UNDEF. If the fixed
+ * array is being used to index a
+ * chunked data set and the dataset
+ * metadata is modified by a SWMR
+ * writer, this field will be set equal
+ * to the object header proxy that is
+ * the flush dependency parent of the
+ * fixed array header.
+ *
+ * The field is used to avoid duplicate
+ * setups of the flush dependency
+ * relationship, and to allow the
+ * fixed array header to destroy
+ * the flush dependency on receipt of
+ * an eviction notification from the
+ * metadata cache.
+ */
+
+ H5AC_info_t *fd_parent_ptr; /* Pointer to flush dependency parent,
+ * if it exists, otherwise NULL. (See
+ * comment for fd_parent_addr above for
+ * further details)
+ */
} H5FA_hdr_t;
/* The fixed array data block information */
@@ -241,6 +269,12 @@ H5_DLLVAR const H5FA_class_t *const H5FA_client_class_g[H5FA_NUM_CLS_ID];
/* Package Private Prototypes */
/******************************/
+/* Generic routines */
+H5_DLL herr_t H5FA__create_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+H5_DLL herr_t H5FA__destroy_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+
/* Header routines */
H5_DLL H5FA_hdr_t *H5FA__hdr_alloc(H5F_t *f);
H5_DLL herr_t H5FA__hdr_init(H5FA_hdr_t *hdr, void *ctx_udata);
diff --git a/src/H5FAprivate.h b/src/H5FAprivate.h
index aa88003..2ce4a1d 100644
--- a/src/H5FAprivate.h
+++ b/src/H5FAprivate.h
@@ -32,7 +32,8 @@
#endif /* NOT_YET */
/* Private headers needed by this file */
-#include "H5Fprivate.h" /* File access */
+#include "H5ACprivate.h" /* Metadata cache */
+#include "H5Fprivate.h" /* File access */
/**************************/
@@ -46,6 +47,9 @@
/* Fixed Array class IDs */
typedef enum H5FA_cls_id_t {
+ H5FA_CLS_CHUNK_ID = 0, /* Fixed array is for indexing dataset chunks w/o filters */
+ H5FA_CLS_FILT_CHUNK_ID, /* Fixed array is for indexing dataset chunks w/filters */
+
/* Start real class IDs at 0 -QAK */
/* (keep these last) */
H5FA_CLS_TEST_ID, /* Fixed array is for testing (do not use for actual data) */
@@ -104,6 +108,12 @@ typedef int (*H5FA_operator_t)(hsize_t idx, const void *_elmt, void *_udata);
/* Library-private Variables */
/*****************************/
+/* The Fixed Array class for dataset chunks w/o filters*/
+H5_DLLVAR const H5FA_class_t H5FA_CLS_CHUNK[1];
+
+/* The Fixed Array class for dataset chunks w/ filters*/
+H5_DLLVAR const H5FA_class_t H5FA_CLS_FILT_CHUNK[1];
+
/***************************************/
/* Library-private Function Prototypes */
@@ -117,6 +127,8 @@ H5_DLL herr_t H5FA_get_nelmts(const H5FA_t *fa, hsize_t *nelmts);
H5_DLL herr_t H5FA_get_addr(const H5FA_t *fa, haddr_t *addr);
H5_DLL herr_t H5FA_set(const H5FA_t *fa, hid_t dxpl_id, hsize_t idx, const void *elmt);
H5_DLL herr_t H5FA_get(const H5FA_t *fa, hid_t dxpl_id, hsize_t idx, void *elmt);
+H5_DLL herr_t H5FA_depend(H5AC_info_t *parent_entry, H5FA_t *fa);
+H5_DLL herr_t H5FA_undepend(H5AC_info_t *parent_entry, H5FA_t *fa);
H5_DLL herr_t H5FA_iterate(H5FA_t *fa, hid_t dxpl_id, H5FA_operator_t op, void *udata);
H5_DLL herr_t H5FA_close(H5FA_t *fa, hid_t dxpl_id);
H5_DLL herr_t H5FA_delete(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata);
diff --git a/src/H5FD.c b/src/H5FD.c
index 2a15fe8..1a63795 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -840,6 +840,10 @@ H5FD_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr)
/* (This will be changed later, when the superblock is located) */
file->base_addr = 0;
+ /* Check for SWMR reader access */
+ if(flags & H5F_ACC_SWMR_READ)
+ file->swmr_read = TRUE;
+
/* Set return value */
ret_value = file;
@@ -1441,6 +1445,32 @@ H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags)
/*-------------------------------------------------------------------------
+ * Function: H5FD_set_feature_flags
+ *
+ * Purpose: Set the feature flags for the VFD
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Oct 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(file);
+
+ /* Set the file's feature flags */
+ file->feature_flags = feature_flags;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FD_set_feature_flags() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FD_get_fs_type_map
*
* Purpose: Retrieve the free space type mapping for the VFD
diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c
index f88fb1e..27088a4 100644
--- a/src/H5FDdirect.c
+++ b/src/H5FDdirect.c
@@ -697,6 +697,7 @@ H5FD_direct_query(const H5FD_t H5_ATTR_UNUSED * _f, unsigned long *flags /* out
*flags |= H5FD_FEAT_ACCUMULATE_METADATA; /* OK to accumulate metadata for faster writes */
*flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */
*flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */
+ *flags |= H5FD_FEAT_SUPPORTS_SWMR_IO; /* VFD supports the single-writer/multiple-readers (SWMR) pattern */
}
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5FDint.c b/src/H5FDint.c
index e7cb2c0..933752c 100644
--- a/src/H5FDint.c
+++ b/src/H5FDint.c
@@ -180,7 +180,15 @@ H5FD_read(H5FD_t *file, const H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t add
if(HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
- if((addr + file->base_addr + size) > eoa)
+
+ /*
+ * If the file is open for SWMR read access, allow access to data past
+ * the end of the allocated space (the 'eoa'). This is done because the
+ * eoa stored in the file's superblock might be out of sync with the
+ * objects being written within the file by the application performing
+ * SWMR write operations.
+ */
+ if(!file->swmr_read && ((addr + file->base_addr + size) > eoa))
HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size=%llu, eoa=%llu",
(unsigned long long)(addr+ file->base_addr), (unsigned long long)size, (unsigned long long)eoa)
diff --git a/src/H5FDlog.c b/src/H5FDlog.c
index a8228e0..1273dba 100644
--- a/src/H5FDlog.c
+++ b/src/H5FDlog.c
@@ -895,6 +895,7 @@ H5FD_log_query(const H5FD_t *_file, unsigned long *flags /* out */)
*flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */
*flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */
*flags |= H5FD_FEAT_POSIX_COMPAT_HANDLE; /* VFD handle is POSIX I/O call compatible */
+ *flags |= H5FD_FEAT_SUPPORTS_SWMR_IO; /* VFD supports the single-writer/multiple-readers (SWMR) pattern */
/* Check for flags that are set by h5repart */
if(file && file->fam_to_sec2)
diff --git a/src/H5FDpkg.h b/src/H5FDpkg.h
index a0c1b3a..45bcfd8 100644
--- a/src/H5FDpkg.h
+++ b/src/H5FDpkg.h
@@ -59,6 +59,7 @@ H5_DLL herr_t H5FD_free_real(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type,
/* Testing functions */
#ifdef H5FD_TESTING
+H5_DLL hbool_t H5FD_supports_swmr_test(const char *vfd_name);
#endif /* H5FD_TESTING */
#endif /* _H5FDpkg_H */
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 0f195ce..2c18659 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -134,6 +134,7 @@ H5_DLL herr_t H5FD_set_eoa(H5FD_t *file, H5FD_mem_t type, haddr_t addr);
H5_DLL haddr_t H5FD_get_eof(const H5FD_t *file, H5FD_mem_t type);
H5_DLL haddr_t H5FD_get_maxaddr(const H5FD_t *file);
H5_DLL herr_t H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags);
+H5_DLL herr_t H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags);
H5_DLL herr_t H5FD_get_fs_type_map(const H5FD_t *file, H5FD_mem_t *type_map);
H5_DLL herr_t H5FD_read(H5FD_t *file, const H5P_genplist_t *dxpl, H5FD_mem_t type,
haddr_t addr, size_t size, void *buf/*out*/);
diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h
index 4183d14..cabca06 100644
--- a/src/H5FDpublic.h
+++ b/src/H5FDpublic.h
@@ -234,6 +234,11 @@ typedef enum H5F_mem_t H5FD_mem_t;
* image to store in memory.
*/
#define H5FD_FEAT_CAN_USE_FILE_IMAGE_CALLBACKS 0x00000800
+ /*
+ * Defining H5FD_FEAT_SUPPORTS_SWMR_IO for a VFL driver means that the
+ * driver supports the single-writer/multiple-readers I/O pattern.
+ */
+#define H5FD_FEAT_SUPPORTS_SWMR_IO 0x00001000
/* Forward declaration */
typedef struct H5FD_t H5FD_t;
@@ -297,6 +302,8 @@ struct H5FD_t {
unsigned long feature_flags; /* VFL Driver feature Flags */
haddr_t maxaddr; /* For this file, overrides class */
haddr_t base_addr; /* Base address for HDF5 data w/in file */
+ hbool_t swmr_read; /* Whether the file is open for SWMR read access */
+ /* Information from file open flags, for SWMR access */
/* Space allocation management fields */
hsize_t threshold; /* Threshold for alignment */
diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c
index bb8f004..149715e 100644
--- a/src/H5FDsec2.c
+++ b/src/H5FDsec2.c
@@ -530,6 +530,7 @@ H5FD_sec2_query(const H5FD_t *_file, unsigned long *flags /* out */)
*flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */
*flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */
*flags |= H5FD_FEAT_POSIX_COMPAT_HANDLE; /* VFD handle is POSIX I/O call compatible */
+ *flags |= H5FD_FEAT_SUPPORTS_SWMR_IO; /* VFD supports the single-writer/multiple-readers (SWMR) pattern */
/* Check for flags that are set by h5repart */
if(file && file->fam_to_sec2)
diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c
index d5b3d40..bc0b342 100644
--- a/src/H5FDstdio.c
+++ b/src/H5FDstdio.c
@@ -114,6 +114,7 @@ typedef struct H5FD_stdio_t {
HANDLE hFile; /* Native windows file handle */
#endif /* H5_HAVE_WIN32_API */
+
} H5FD_stdio_t;
/* Use similar structure as in H5private.h by defining Windows stuff first. */
@@ -558,7 +559,11 @@ H5FD_stdio_query(const H5FD_t *_f, unsigned long /*OUT*/ *flags)
/* Quiet the compiler */
_f=_f;
- /* Set the VFL feature flags that this driver supports */
+ /* Set the VFL feature flags that this driver supports.
+ *
+ * Note that this VFD does not support SWMR due to the unpredictable
+ * nature of the buffering layer.
+ */
if(flags) {
*flags = 0;
*flags|=H5FD_FEAT_AGGREGATE_METADATA; /* OK to aggregate metadata allocations */
diff --git a/src/H5FDtest.c b/src/H5FDtest.c
new file mode 100644
index 0000000..9d2f2e9
--- /dev/null
+++ b/src/H5FDtest.c
@@ -0,0 +1,119 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5FDtest.c
+ * Fall 2014
+ *
+ * Purpose: File driver testing routines.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5FDmodule.h" /* This source code file is part of the H5FD module */
+#define H5FD_TESTING /* Suppress warning about H5FD testing funcs */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5FDpkg.h" /* File Drivers */
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_supports_swmr_test()
+ *
+ * Purpose: Determines if a VFD supports SWMR.
+ *
+ * The function determines SWMR support by inspecting the
+ * HDF5_DRIVER environment variable, not by checking the
+ * VFD feature flags (which do not exist until the driver
+ * is instantiated).
+ *
+ * See test/Makefile.am for a list of the VFD strings.
+ *
+ * This function is only intended for use in the test code.
+ *
+ * Return: TRUE (1) if the VFD supports SWMR I/O or vfd_name is
+ * NULL or the empty string (which implies the default VFD).
+ *
+ * FALSE (0) if it does not
+ *
+ * This function cannot fail at this time so there is no
+ * error return value.
+ *
+ * Programmer: Dana Robinson
+ * Fall 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5FD_supports_swmr_test(const char *vfd_name)
+{
+ hbool_t ret_value = FALSE;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ if(!vfd_name || !HDstrcmp(vfd_name, ""))
+ ret_value = TRUE;
+ else
+ ret_value = !HDstrcmp(vfd_name, "direct")
+ || !HDstrcmp(vfd_name, "log")
+ || !HDstrcmp(vfd_name, "sec2");
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_supports_swmr_test() */
+
diff --git a/src/H5FS.c b/src/H5FS.c
index 90f04ac..30ea330 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -132,6 +132,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl
fspace->expand_percent = fs_create->expand_percent;
fspace->max_sect_addr = fs_create->max_sect_addr;
fspace->max_sect_size = fs_create->max_sect_size;
+ fspace->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0;
fspace->alignment = alignment;
fspace->threshold = threshold;
@@ -1005,6 +1006,84 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5FS_depend
+ *
+ * Purpose: Make a child flush dependency between the free space
+ * manager's and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS_depend(H5AC_info_t *parent_entry, H5FS_t *fs)
+{
+ /* Local variables */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(fs);
+
+ /* Set up flush dependency between parent entry and free space manager */
+ if(H5FS__create_flush_depend(parent_entry, (H5AC_info_t *)fs) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on file metadata")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FA_undepend
+ *
+ * Purpose: Remove a child flush dependency between the free space
+ * manager's and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS_undepend(H5AC_info_t *parent_entry, H5FS_t *fs)
+{
+ /* Local variables */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(fs);
+
+ /* Remove flush dependency between parent entry and free space manager */
+ if(H5FS__destroy_flush_depend(parent_entry, (H5AC_info_t *)fs) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency on file metadata")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS_undepend() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FS__hdr_dest
*
* Purpose: Destroys a free space header in memory.
diff --git a/src/H5FScache.c b/src/H5FScache.c
index 2f7dda8..657c5c4 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -78,7 +78,10 @@ static herr_t H5FS__sinfo_serialize_sect_cb(void *_item, void H5_ATTR_UNUSED *ke
static herr_t H5FS__sinfo_serialize_node_cb(void *_item, void H5_ATTR_UNUSED *key, void *_udata);
/* Metadata cache callbacks */
-static herr_t H5FS__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5FS__cache_hdr_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5FS__cache_hdr_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5FS__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FS__cache_hdr_image_len(const void *thing, size_t *image_len,
@@ -91,7 +94,10 @@ static herr_t H5FS__cache_hdr_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5FS__cache_hdr_free_icr(void *thing);
-static herr_t H5FS__cache_sinfo_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5FS__cache_sinfo_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5FS__cache_sinfo_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5FS__cache_sinfo_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FS__cache_sinfo_image_len(const void *thing, size_t *image_len,
@@ -102,6 +108,7 @@ static herr_t H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id,
unsigned *flags);
static herr_t H5FS__cache_sinfo_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
+static herr_t H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5FS__cache_sinfo_free_icr(void *thing);
@@ -116,6 +123,7 @@ const H5AC_class_t H5AC_FSPACE_HDR[1] = {{
H5FD_MEM_FSPACE_HDR, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5FS__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5FS__cache_hdr_verify_chksum, /* 'verify_chksum' callback */
H5FS__cache_hdr_deserialize, /* 'deserialize' callback */
H5FS__cache_hdr_image_len, /* 'image_len' callback */
H5FS__cache_hdr_pre_serialize, /* 'pre_serialize' callback */
@@ -133,11 +141,12 @@ const H5AC_class_t H5AC_FSPACE_SINFO[1] = {{
H5FD_MEM_FSPACE_SINFO, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5FS__cache_sinfo_get_load_size, /* 'get_load_size' callback */
+ H5FS__cache_sinfo_verify_chksum, /* 'verify_chksum' callback */
H5FS__cache_sinfo_deserialize, /* 'deserialize' callback */
H5FS__cache_sinfo_image_len, /* 'image_len' callback */
H5FS__cache_sinfo_pre_serialize, /* 'pre_serialize' callback */
H5FS__cache_sinfo_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5FS__cache_sinfo_notify, /* 'notify' callback */
H5FS__cache_sinfo_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -169,9 +178,11 @@ const H5AC_class_t H5AC_FSPACE_SINFO[1] = {{
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
+H5FS__cache_hdr_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5FS_hdr_cache_ud_t *udata = (const H5FS_hdr_cache_ud_t *)_udata; /* User-data for metadata cache callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5FS_hdr_cache_ud_t *udata = (H5FS_hdr_cache_ud_t *)_udata; /* User-data for metadata cache callback */
FUNC_ENTER_STATIC_NOERR
@@ -180,14 +191,55 @@ H5FS__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->f);
HDassert(image_len);
- /* Set the image length size */
- *image_len = (size_t)H5FS_HEADER_SIZE(udata->f);
+ if(image == NULL) {
+ /* Set the image length size */
+ *image_len = (size_t)H5FS_HEADER_SIZE(udata->f);
+ } else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5FS__cache_hdr_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_hdr_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5FS__cache_hdr_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS__cache_hdr_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FS__cache_hdr_deserialize
*
* Purpose: Given a buffer containing the on disk image of the free space
@@ -212,7 +264,6 @@ H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
H5FS_hdr_cache_ud_t *udata = (H5FS_hdr_cache_ud_t *)_udata; /* User data for callback */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
unsigned nclasses; /* Number of section classes */
H5FS_t *ret_value = NULL; /* Return value */
@@ -285,8 +336,7 @@ H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* Allocated size of serialized free space sections */
H5F_DECODE_LENGTH(udata->f, image, fspace->alloc_sect_size);
- /* Compute checksum on indirect block */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -294,10 +344,6 @@ H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) <= len);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
-
/* Set return value */
ret_value = fspace;
@@ -796,10 +842,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS__cache_sinfo_get_load_size(const void *_udata, size_t *image_len)
+H5FS__cache_sinfo_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5FS_t *fspace; /* free space manager */
- const H5FS_sinfo_cache_ud_t *udata = (const H5FS_sinfo_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ const H5FS_t *fspace; /* free space manager */
+ H5FS_sinfo_cache_ud_t *udata = (H5FS_sinfo_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -810,13 +858,54 @@ H5FS__cache_sinfo_get_load_size(const void *_udata, size_t *image_len)
HDassert(fspace->sect_size > 0);
HDassert(image_len);
- *image_len = (size_t)(fspace->sect_size);
+ if(image == NULL)
+ *image_len = (size_t)(fspace->sect_size);
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5FS__cache_sinfo_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_sinfo_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5FS__cache_sinfo_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS__cache_sinfo_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FS__cache_sinfo_deserialize
*
* Purpose: Given a buffer containing the on disk image of the free space
@@ -843,7 +932,6 @@ H5FS__cache_sinfo_deserialize(const void *_image, size_t len, void *_udata,
size_t old_sect_size; /* Old section size */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
uint32_t stored_chksum; /* Stored metadata checksum */
- uint32_t computed_chksum; /* Computed metadata checksum */
void * ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
@@ -950,16 +1038,11 @@ H5FS__cache_sinfo_deserialize(const void *_image, size_t len, void *_udata,
HDassert(old_tot_space == fspace->tot_space);
} /* end if */
- /* Compute checksum on indirect block */
- computed_chksum = H5_checksum_metadata((const uint8_t *)_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
-
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == old_sect_size);
@@ -1186,6 +1269,64 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_sinfo_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(sinfo);
+
+ /* Check if the file was opened with SWMR-write access */
+ if(sinfo->fspace->swmr_write) {
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on parent */
+ if(H5FS__create_flush_depend((H5AC_info_t *)sinfo->fspace, (H5AC_info_t *)sinfo) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency between data block and header, address = %llu", (unsigned long long)sinfo->fspace->sect_addr)
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* Destroy flush dependency on parent */
+ if(H5FS__destroy_flush_depend((H5AC_info_t *)sinfo->fspace, (H5AC_info_t *)sinfo) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS__cache_sinfo_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FS__cache_sinfo_free_icr
*
* Purpose: Free the memory used for the in core representation of the
diff --git a/src/H5FSint.c b/src/H5FSint.c
new file mode 100644
index 0000000..60cedd5
--- /dev/null
+++ b/src/H5FSint.c
@@ -0,0 +1,145 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5FSint.c
+ * Fall 2012
+ * Dana Robinson <derobins@hdfgroup.org>
+ *
+ * Purpose: Internal routines for free space managers.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/**********************/
+/* Module Declaration */
+/**********************/
+
+#include "H5FSmodule.h" /* This source code file is part of the H5FS module */
+
+
+/***********************/
+/* Other Packages Used */
+/***********************/
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error Handling */
+#include "H5FSpkg.h" /* Free Space Managers */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FS__create_flush_depend
+ *
+ * Purpose: Create a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Create a flush dependency between parent and child entry */
+ if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS__create_flush_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5FS__destroy_flush_depend
+ *
+ * Purpose: Destroy a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Destroy a flush dependency between parent and child entry */
+ if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS__destroy_flush_depend() */
+
diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h
index 08c2529..bb5ba7b 100644
--- a/src/H5FSpkg.h
+++ b/src/H5FSpkg.h
@@ -179,6 +179,7 @@ struct H5FS_t {
haddr_t addr; /* Address of free space header on disk */
size_t hdr_size; /* Size of free space header on disk */
H5FS_sinfo_t *sinfo; /* Section information */
+ hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */
unsigned sinfo_lock_count; /* # of times the section info has been locked */
hbool_t sinfo_protected; /* Whether the section info was protected when locked */
hbool_t sinfo_modified; /* Whether the section info has been modified while locked */
@@ -222,6 +223,12 @@ H5FL_EXTERN(H5FS_t);
/* Package Private Prototypes */
/******************************/
+/* Generic routines */
+H5_DLL herr_t H5FS__create_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+H5_DLL herr_t H5FS__destroy_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+
/* Free space manager header routines */
H5_DLL H5FS_t *H5FS__new(const H5F_t *f, uint16_t nclasses,
const H5FS_section_class_t *classes[], void *cls_init_udata);
diff --git a/src/H5FSprivate.h b/src/H5FSprivate.h
index c5ad38f..2dacce6 100644
--- a/src/H5FSprivate.h
+++ b/src/H5FSprivate.h
@@ -31,6 +31,7 @@
#include "H5FSpublic.h"
/* Private headers needed by this file */
+#include "H5ACprivate.h" /* Metadata cache */
#include "H5Fprivate.h" /* File access */
#include "H5FLprivate.h" /* Free Lists */
@@ -183,6 +184,8 @@ H5_DLL herr_t H5FS_close(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace);
H5_DLL herr_t H5FS_alloc_hdr(H5F_t *f, H5FS_t *fspace, haddr_t *fs_addr, hid_t dxpl_id);
H5_DLL herr_t H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id);
H5_DLL herr_t H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id);
+H5_DLL herr_t H5FS_depend(H5AC_info_t *parent_entry, H5FS_t *fs);
+H5_DLL herr_t H5FS_undepend(H5AC_info_t *parent_entry, H5FS_t *fs);
/* Free space section routines */
H5_DLL herr_t H5FS_sect_add(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
diff --git a/src/H5Faccum.c b/src/H5Faccum.c
index 53f51ad..d8cd614 100644
--- a/src/H5Faccum.c
+++ b/src/H5Faccum.c
@@ -418,6 +418,11 @@ done:
* koziol@hdfgroup.org
* Jan 10 2008
*
+ * Modifications:
+ * Vailin Choi; June 2013
+ * This is a fix for SWMR:
+ * For a large write that is >= than H5F_ACCUM_MAX_SIZE,
+ * flush the metadata in the accumulator first before the write.
*-------------------------------------------------------------------------
*/
herr_t
@@ -735,6 +740,12 @@ HDmemset(accum->buf + size, 0, (accum->alloc_size - size));
} /* end else */
} /* end if */
else {
+ if((H5F_INTENT(fio_info->f) & H5F_ACC_SWMR_WRITE) > 0) {
+ /* Flush if dirty and reset accumulator */
+ if(H5F__accum_reset(fio_info, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator")
+ }
+
/* Write the data */
if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
diff --git a/src/H5Fint.c b/src/H5Fint.c
index 94a5488..cc693ca 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -127,6 +127,7 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref)
{
H5P_genplist_t *new_plist; /* New property list */
H5P_genplist_t *old_plist; /* Old property list */
+ H5F_object_flush_t flush_info; /* Object flush property values */
H5FD_driver_prop_t driver_prop; /* Property for driver ID & info */
hbool_t driver_prop_copied = FALSE; /* Whether the driver property has been set up */
unsigned efc_size = 0;
@@ -168,6 +169,17 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set 'small data' cache size")
if(H5P_set(new_plist, H5F_ACS_LATEST_FORMAT_NAME, &(f->shared->latest_format)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set 'latest format' flag")
+ if(H5P_set(new_plist, H5F_ACS_METADATA_READ_ATTEMPTS_NAME, &(f->shared->read_attempts)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set 'read attempts ' flag")
+
+ /* Obtain object flush property values */
+ flush_info.func = f->shared->object_flush.func;
+ flush_info.udata = f->shared->object_flush.udata;
+
+ /* Set values */
+ if(H5P_set(new_plist, H5F_ACS_OBJECT_FLUSH_CB_NAME, &flush_info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set object flush callback")
+
if(f->shared->efc)
efc_size = H5F_efc_max_nfiles(f->shared->efc);
if(H5P_set(new_plist, H5F_ACS_EFC_SIZE_NAME, &efc_size) < 0)
@@ -621,6 +633,19 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get sieve buffer size")
if(H5P_get(plist, H5F_ACS_LATEST_FORMAT_NAME, &(f->shared->latest_format)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get 'latest format' flag")
+
+ /* For latest format or SWMR_WRITE, activate all latest version support */
+ if((f->shared->latest_format) || (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE))
+ f->shared->latest_flags |= H5F_LATEST_ALL_FLAGS;
+
+ if(H5P_get(plist, H5F_ACS_USE_MDC_LOGGING_NAME, &(f->shared->use_mdc_logging)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get 'use mdc logging' flag")
+ if(H5P_get(plist, H5F_ACS_START_MDC_LOG_ON_ACCESS_NAME, &(f->shared->start_mdc_log_on_access)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get 'start mdc log on access' flag")
+ /* Require the latest format to use SWMR */
+ /* (Need to revisit this when the 1.10 release is made, and require
+ * 1.10 or later -QAK)
+ */
if(H5P_get(plist, H5F_ACS_META_BLOCK_SIZE_NAME, &(f->shared->meta_aggr.alloc_size)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get metadata cache size")
f->shared->meta_aggr.feature_flag = H5FD_FEAT_AGGREGATE_METADATA;
@@ -639,6 +664,15 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad maximum address from VFD")
if(H5FD_get_feature_flags(lf, &f->shared->feature_flags) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get feature flags from VFD")
+
+ /* Require the SWMR feature flag if SWMR I/O is desired */
+ if(!H5F_HAS_FEATURE(f, H5FD_FEAT_SUPPORTS_SWMR_IO) && (H5F_INTENT(f) & (H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ)))
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "must use a SWMR-compatible VFD when SWMR is specified")
+
+ /* Require a POSIX compatible VFD to use SWMR feature */
+ /* (It's reasonable to try to expand this to other VFDs eventually -QAK) */
+ if(!H5F_HAS_FEATURE(f, H5FD_FEAT_POSIX_COMPAT_HANDLE) && (H5F_INTENT(f) & (H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ)))
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "must use POSIX compatible VFD with SWMR write access")
if(H5FD_get_fs_type_map(lf, f->shared->fs_type_map) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get free space type mapping from VFD")
if(H5MF_init_merge_flags(f) < 0)
@@ -655,6 +689,49 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
*/
f->shared->use_tmp_space = !H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI);
+ /* Retrieve the # of read attempts here so that sohm in superblock will get the correct # of attempts */
+ if(H5P_get(plist, H5F_ACS_METADATA_READ_ATTEMPTS_NAME, &f->shared->read_attempts) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get the # of read attempts")
+
+ /* When opening file with SWMR access, the # of read attempts is H5F_SWMR_METADATA_READ_ATTEMPTS if not set */
+ /* When opening file without SWMR access, the # of read attempts is always H5F_METADATA_READ_ATTEMPTS (set or not set) */
+ if(H5F_INTENT(f) & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE)) {
+ /* If no value for read attempts has been set, use the default */
+ if(!f->shared->read_attempts)
+ f->shared->read_attempts = H5F_SWMR_METADATA_READ_ATTEMPTS;
+
+ /* Turn off accumulator with SWMR */
+ f->shared->feature_flags &= ~(unsigned)H5FD_FEAT_ACCUMULATE_METADATA;
+ if(H5FD_set_feature_flags(f->shared->lf, f->shared->feature_flags) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, NULL, "can't set feature_flags in VFD")
+ } /* end if */
+ else
+ f->shared->read_attempts = H5F_METADATA_READ_ATTEMPTS;
+
+ /* Determine the # of bins for metdata read retries */
+ if(H5F_set_retries(f) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "can't set retries and retries_nbins")
+
+ /* Get the metadata cache log location (if we're logging) */
+ {
+ char *mdc_log_location = NULL; /* location of metadata cache log location */
+
+ if(H5P_get(plist, H5F_ACS_MDC_LOG_LOCATION_NAME, &mdc_log_location) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get mdc log location")
+ if(mdc_log_location != NULL) {
+ size_t len = HDstrlen(mdc_log_location);
+ if(NULL == (f->shared->mdc_log_location = (char *)HDcalloc(len + 1, sizeof(char))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "can't allocate memory for mdc log file name")
+ HDstrncpy(f->shared->mdc_log_location, mdc_log_location, len);
+ }
+ else
+ f->shared->mdc_log_location = NULL;
+ }
+
+ /* Get object flush callback information */
+ if(H5P_get(plist, H5F_ACS_OBJECT_FLUSH_CB_NAME, &(f->shared->object_flush)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get object flush cb info")
+
/*
* Create a metadata cache with the specified number of elements.
* The cache might be created with a different number of elements and
@@ -765,10 +842,19 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Flush the file again (if requested), as shutting down the
* free space manager may dirty some data structures again.
*/
- if(flush)
+ if(flush) {
+ /* Clear status_flags */
+ f->shared->sblock->status_flags &= ~H5F_SUPER_WRITE_ACCESS;
+ f->shared->sblock->status_flags &= ~H5F_SUPER_SWMR_WRITE_ACCESS;
+ /* Mark superblock dirty in cache, so change will get encoded */
+ /* Push error, but keep going*/
+ if(H5F_super_dirty(f) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+
if(H5F_flush(f, dxpl_id, TRUE) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ }
} /* end if */
/* if it exists, unpin the driver information block cache entry,
@@ -796,6 +882,10 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file")
+ /* Clean up the metadata cache log location string */
+ if(f->shared->mdc_log_location)
+ HDfree(f->shared->mdc_log_location);
+
/*
* Do not close the root group since we didn't count it, but free
* the memory associated with it.
@@ -904,6 +994,37 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
* The ACCESS_PARMS argument is optional. A null pointer will
* cause the default file access parameters to be used.
*
+ * The following two tables show results of file opens for single and concurrent access:
+ *
+ * SINGLE PROCESS ACCESS CONCURRENT ACCESS
+ *
+ * #1st open# #1st open#
+ * -- SR SR -- -- SR SR -- -- SR SR -- -- SR SR --
+ * -- -- SW SW SW SW -- -- -- -- SW SW SW SW -- --
+ * W W W W R R R R W W W W R R R R
+ * #2nd open# #2nd open#
+ * -------------------------- --------------------------
+ * -- -- W | s x x s x x f f | -- -- W | f x x f x x f f |
+ * SR -- W | x x x x x x x x | SR -- W | x x x x x x x x |
+ * SR SW W | x x x x x x x x | SR SW W | x x x x x x x x |
+ * -- SW W | f x x s x x f f | -- SW W | f x x f x x f f |
+ * -- SW R | x x x x x x x x | -- SW R | x x x x x x x x |
+ * SR SW R | x x x x x x x x | SR SW R | x x x x x x x x |
+ * SR -- R | s x x s x x s f | SR -- R | f x x s x x s s |
+ * -- -- R | s x x s x x s s | -- -- R | f x x f x x s s |
+ * -------------------------- --------------------------
+ *
+ * Notations:
+ * W: H5F_ACC_RDWR
+ * R: H5F_ACC_RDONLY
+ * SW: H5F_ACC_SWMR_WRITE
+ * SR: H5F_ACC_SWMR_READ
+ *
+ * x: the first open or second open itself fails due to invalid flags combination
+ * f: the open fails with flags combination from both the first and second opens
+ * s: the open succeeds with flags combination from both the first and second opens
+ *
+ *
* Return: Success: A new file pointer.
* Failure: NULL
*
@@ -923,6 +1044,8 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
H5FD_class_t *drvr; /*file driver class info */
H5P_genplist_t *a_plist; /*file access property list */
H5F_close_degree_t fc_degree; /*file close degree */
+ hbool_t set_flag = FALSE; /*set the status_flags in the superblock */
+ hbool_t clear = FALSE; /*clear the status_flags */
H5F_t *ret_value = NULL; /*actual return value */
FUNC_ENTER_NOAPI(NULL)
@@ -986,7 +1109,8 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
* file (since we can't do that while the file is open), or if the
* request was to create a non-existent file (since the file already
* exists), or if the new request adds write access (since the
- * readers don't expect the file to change under them).
+ * readers don't expect the file to change under them), or if the
+ * SWMR write/read access flags don't agree.
*/
if(H5FD_close(lf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to close low-level file info")
@@ -997,6 +1121,11 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
if((flags & H5F_ACC_RDWR) && 0 == (shared->flags & H5F_ACC_RDWR))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for read-only")
+ if((flags & H5F_ACC_SWMR_WRITE) && 0 == (shared->flags & H5F_ACC_SWMR_WRITE))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "SWMR write access flag not the same for file that is already open")
+ if((flags & H5F_ACC_SWMR_READ) && !((shared->flags & H5F_ACC_SWMR_WRITE) || (shared->flags & H5F_ACC_SWMR_READ) || (shared->flags & H5F_ACC_RDWR)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "SWMR read access flag not the same for file that is already open")
+
/* Allocate new "high-level" file struct */
if((file = H5F_new(shared, flags, fcpl_id, fapl_id, NULL)) == NULL)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to create new file object")
@@ -1009,18 +1138,25 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
* open it are different than the desired flags. Close the tentative
* file and open it for real.
*/
- if(H5FD_close(lf) < 0) {
- file = NULL; /*to prevent destruction of wrong file*/
+ if(H5FD_close(lf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to close low-level file info")
- } /* end if */
- if(NULL == (lf = H5FD_open(name, flags, fapl_id, HADDR_UNDEF))) {
- file = NULL; /*to prevent destruction of wrong file*/
+
+ if(NULL == (lf = H5FD_open(name, flags, fapl_id, HADDR_UNDEF)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to open file")
- } /* end if */
+
} /* end if */
- if(NULL == (file = H5F_new(NULL, flags, fcpl_id, fapl_id, lf)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to create new file object")
+ /* Place an advisory lock on the file */
+ if((H5FD_lock(lf, (hbool_t)((flags & H5F_ACC_RDWR) ? TRUE : FALSE)) < 0) ||
+ (NULL == (file = H5F_new(NULL, flags, fcpl_id, fapl_id, lf)))) {
+ if(H5FD_close(lf) < 0) /* Closing will remove the lock */
+ HDONE_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to close low-level file info")
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to lock the file or initialize file structure")
+ }
+
+ /* Need to set status_flags in the superblock if the driver has a 'lock' method */
+ if(drvr->lock)
+ set_flag = TRUE;
} /* end else */
/* Retain the name the file was opened with */
@@ -1054,7 +1190,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
} else if (1 == shared->nrefs) {
/* Read the superblock if it hasn't been read before. */
- if(H5F__super_read(file, dxpl_id) < 0)
+ if(H5F__super_read(file, dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, NULL, "unable to read superblock")
/* Open the root group */
@@ -1075,6 +1211,16 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
if(H5P_get(a_plist, H5F_ACS_CLOSE_DEGREE_NAME, &fc_degree) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get file close degree")
+ /* This is a private property to clear the status_flags in the super block */
+ /* Use by h5clear and a routine in test/flush2.c to clear the test file's status_flags */
+ if(H5P_exist_plist(a_plist, H5F_ACS_CLEAR_STATUS_FLAGS_NAME) > 0) {
+ if(H5P_get(a_plist, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get clearance for status_flags")
+ else if(clear) {
+ file->shared->sblock->status_flags = 0;
+ }
+ }
+
if(shared->nrefs == 1) {
if(fc_degree == H5F_CLOSE_DEFAULT)
shared->fc_degree = lf->cls->fc_degree;
@@ -1095,6 +1241,51 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
if(H5F_build_actual_name(file, a_plist, name, &file->actual_name) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to build actual name")
+ if(set_flag) {
+ if(H5F_INTENT(file) & H5F_ACC_RDWR) { /* Set and check consistency of status_flags */
+ /* Skip check of status_flags for file with < superblock version 3 */
+ if(file->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) {
+
+ if(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS ||
+ file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for write/SWMR write (may use <h5clear file> to clear file consistency flags)")
+ } /* version 3 superblock */
+
+ file->shared->sblock->status_flags |= H5F_SUPER_WRITE_ACCESS;
+ if(H5F_INTENT(file) & H5F_ACC_SWMR_WRITE)
+ file->shared->sblock->status_flags |= H5F_SUPER_SWMR_WRITE_ACCESS;
+
+ /* Flush the superblock */
+ if(H5F_super_dirty(file) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, NULL, "unable to mark superblock as dirty")
+ if(H5F_flush_tagged_metadata(file, (haddr_t)0, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, NULL, "unable to flush superblock")
+
+ /* Remove the file lock for SWMR_WRITE */
+ if(H5F_INTENT(file) & H5F_ACC_SWMR_WRITE) {
+ if(H5FD_unlock(file->shared->lf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to unlock the file")
+ }
+ } else { /* H5F_ACC_RDONLY: check consistency of status_flags */
+ /* Skip check of status_flags for file with < superblock version 3 */
+ if(file->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) {
+
+ if(H5F_INTENT(file) & H5F_ACC_SWMR_READ) {
+ if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS &&
+ !(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
+ ||
+ (!(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) &&
+ file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is not already open for SWMR writing")
+
+ } else if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) ||
+ (file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for write (may use <h5clear file> to clear file consistency flags)")
+
+ } /* version 3 superblock */
+ }
+ } /* end if set_flag */
+
/* Success */
ret_value = file;
@@ -2011,6 +2202,8 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len)
/* test to see if a buffer was provided -- if not, we are done */
if(buf_ptr != NULL) {
size_t space_needed; /* size of file image */
+ hsize_t tmp;
+ size_t tmp_size;
/* Check for buffer too small */
if((haddr_t)buf_len < eoa)
@@ -2022,6 +2215,15 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len)
/* (Note compensation for base address addition in internal routine) */
if(H5FD_read(fd_ptr, H5AC_ind_dxpl_g, H5FD_MEM_DEFAULT, 0, space_needed, buf_ptr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, FAIL, "file image read request failed")
+
+ /* Offset to "status_flags" in the superblock */
+ tmp = H5F_SUPER_STATUS_FLAGS_OFF(file->shared->sblock->super_vers);
+ /* Size of "status_flags" depends on the superblock version */
+ tmp_size = H5F_SUPER_STATUS_FLAGS_SIZE(file->shared->sblock->super_vers);
+
+ /* Clear "status_flags" */
+ HDmemset((uint8_t *)(buf_ptr) + tmp, 0, tmp_size);
+
} /* end if */
done:
@@ -2030,6 +2232,129 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5F_track_metadata_read_retries
+ *
+ * Purpose: To track the # of a "retries" (log10) for a metadata item.
+ * This routine should be used only when:
+ * "retries" > 0
+ * f->shared->read_attempts > 1 (does not have retry when 1)
+ * f->shared->retries_nbins > 0 (calculated based on f->shared->read_attempts)
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Vailin Choi; October 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_track_metadata_read_retries(H5F_t *f, unsigned actype, unsigned retries)
+{
+ unsigned log_ind; /* Index to the array of retries based on log10 of retries */
+ double tmp; /* Temporary value, to keep compiler quiet */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared->read_attempts > 1);
+ HDassert(f->shared->retries_nbins > 0);
+ HDassert(retries > 0);
+ HDassert(retries < f->shared->read_attempts);
+ HDassert(actype < H5AC_NTYPES);
+
+ /* Allocate memory for retries */
+ if(NULL == f->shared->retries[actype])
+ if(NULL == (f->shared->retries[actype] = (uint32_t *)HDcalloc((size_t)f->shared->retries_nbins, sizeof(uint32_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Index to retries based on log10 */
+ tmp = HDlog10((double)retries);
+ log_ind = (unsigned)tmp;
+ HDassert(log_ind < f->shared->retries_nbins);
+
+ /* Increment the # of the "retries" */
+ f->shared->retries[actype][log_ind]++;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5F_track_metadata_read_retries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_set_retries
+ *
+ * Purpose: To initialize data structures for read retries:
+ * --zero out "retries"
+ * --set up "retries_nbins" based on read_attempts
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Vailin Choi; November 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_set_retries(H5F_t *f)
+{
+ double tmp; /* Temporary variable */
+
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+
+ /* Initialize the tracking for metadata read retries */
+ HDmemset(f->shared->retries, 0, sizeof(f->shared->retries));
+
+ /* Initialize the # of bins for retries */
+ f->shared->retries_nbins = 0;
+ if(f->shared->read_attempts > 1) {
+ tmp = HDlog10((double)(f->shared->read_attempts - 1));
+ f->shared->retries_nbins = (unsigned)tmp + 1;
+ }
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5F_set_retries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_object_flush_cb
+ *
+ * Purpose: To invoke the callback function for object flush that is set
+ * in the file's access property list.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Vailin Choi; October 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_object_flush_cb(H5F_t *f, hid_t obj_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Invoke object flush callback if there is one */
+ if(f->shared->object_flush.func && f->shared->object_flush.func(obj_id, f->shared->object_flush.udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "object flush callback returns error")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5F_object_flush_cb() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F__set_base_addr
*
* Purpose: Quick and dirty routine to set the file's 'base_addr' value
diff --git a/src/H5Fio.c b/src/H5Fio.c
index 04c4055..d312b47 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -177,3 +177,241 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F_block_write() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_flush_tagged_metadata
+ *
+ * Purpose: Flushes metadata with specified tag in the metadata cache
+ * to disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * September 9, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_flush_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id)
+{
+ H5F_io_info_t fio_info; /* I/O info for operation */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Use tag to search for and flush associated metadata */
+ if(H5AC_flush_tagged_metadata(f, tag, dxpl_id)<0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush tagged metadata")
+
+ /* Set up I/O info for operation */
+ fio_info.f = f;
+
+ if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
+
+ /* Flush and reset the accumulator */
+ if(H5F__accum_reset(&fio_info, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator")
+
+ /* Flush file buffers to disk. */
+ if(H5FD_flush(f->shared->lf, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "low level flush failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5F_flush_tagged_metadata */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_evict_tagged_metadata
+ *
+ * Purpose: Evicts metadata from the cache with specified tag.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * September 9, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_evict_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Unpin the superblock, as this will be marked for eviction and it can't
+ be pinned. */
+ if(H5AC_unpin_entry(f->shared->sblock) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock")
+ f->shared->sblock = NULL;
+
+ /* Evict the object's metadata */
+ if(H5AC_evict_tagged_metadata(f, tag, dxpl_id)<0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "unable to evict tagged metadata")
+
+ /* Re-read the superblock. */
+ if(H5F__super_read(f, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_READERROR, FAIL, "unable to read superblock")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5F_evict_tagged_metadata */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_evict_cache_entries
+ *
+ * Purpose: To revict all cache entries except the pinned superblock entry
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_evict_cache_entries(H5F_t *f, hid_t dxpl_id)
+{
+ unsigned status = 0;
+ int32_t cur_num_entries;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Evict all except pinned entries in the cache */
+ if(H5AC_evict(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "unable to evict all except pinned entries")
+
+ /* Retrieve status of the superblock */
+ if(H5AC_get_entry_status(f, (haddr_t)0, &status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "unable to get entry status")
+
+ /* Verify status of the superblock entry in the cache */
+ if(!(status & H5AC_ES__IN_CACHE) || !(status & H5AC_ES__IS_PINNED))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "unable to get entry status")
+
+ /* Get the number of cache entries */
+ if(H5AC_get_cache_size(f->shared->cache, NULL, NULL, NULL, &cur_num_entries) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_get_cache_size() failed.")
+
+ /* Should be the only one left in the cache */
+ if(cur_num_entries != 1)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "number of cache entries is not correct")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5F_evict_cache_entries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_checksums
+ *
+ * Purpose: Decode checksum stored in the buffer
+ * Calculate checksum for the data in the buffer
+ *
+ * Note: Assumes that the checksum is the last data in the buffer
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_get_checksums(const uint8_t *buf, size_t buf_size, uint32_t *s_chksum/*out*/, uint32_t *c_chksum/*out*/)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Check arguments */
+ HDassert(buf);
+ HDassert(buf_size);
+
+ /* Return the stored checksum */
+ if(s_chksum) {
+ const uint8_t *chk_p; /* Pointer into raw data buffer */
+
+ /* Offset to the checksum in the buffer */
+ chk_p = buf + buf_size - H5_SIZEOF_CHKSUM;
+
+ /* Decode the checksum stored in the buffer */
+ UINT32DECODE(chk_p, *s_chksum);
+ } /* end if */
+
+ /* Return the computed checksum for the buffer */
+ if(c_chksum)
+ *c_chksum = H5_checksum_metadata(buf, buf_size - H5_SIZEOF_CHKSUM, 0);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5F_get_chksums() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_read_check_meatadata
+ *
+ * Purpose: Attempts to read and validate a piece of meatadata that has
+ * checksum as follows:
+ * a) read the piece of metadata
+ * b) calculate checksum for the buffer of metadata
+ * c) decode the checksum stored in the buffer of metadata
+ * d) compare the computed checksum with its stored checksum
+ *
+ * The library will perform (a) to (d) above for "f->read_attempts"
+ * times or until the checksum comparison in (d) passes.
+ * This routine also records the # of retries via
+ * H5F_track_metadata_read_retries()
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_read_check_metadata(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
+ unsigned actype, haddr_t addr, size_t read_size, size_t chk_size,
+ uint8_t *buf/*out*/)
+{
+ unsigned tries, max_tries; /* The # of read attempts */
+ unsigned retries; /* The # of retries */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Get the # of read attempts */
+ max_tries = tries = H5F_GET_READ_ATTEMPTS(f);
+
+ do {
+ /* Read header from disk */
+ if(H5F_block_read(f, type, addr, read_size, dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read metadata")
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(buf, chk_size, &stored_chksum, &computed_chksum);
+
+ /* Verify checksum */
+ if(stored_chksum == computed_chksum)
+ break;
+ } while(--tries);
+
+ /* Check for too many tries */
+ if(tries == 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "incorrect metadatda checksum after all read attempts (%u) for %u bytes:c_chksum=%u, s_chkum=%u",
+ max_tries, chk_size, computed_chksum, stored_chksum)
+
+ /* Calculate and track the # of retries */
+ retries = max_tries - tries;
+ if(retries) /* Does not track 0 retry */
+ if(H5F_track_metadata_read_retries(f, actype, retries) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "cannot track read tries = %u ", retries)
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5F_read_check_metadata */
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 2255085..f9cd89f 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -56,10 +56,11 @@
/* Superblock status flags */
#define H5F_SUPER_WRITE_ACCESS 0x01
#define H5F_SUPER_FILE_OK 0x02
-#define H5F_SUPER_ALL_FLAGS (H5F_SUPER_WRITE_ACCESS | H5F_SUPER_FILE_OK)
+#define H5F_SUPER_SWMR_WRITE_ACCESS 0x04
+#define H5F_SUPER_ALL_FLAGS (H5F_SUPER_WRITE_ACCESS | H5F_SUPER_FILE_OK | H5F_SUPER_SWMR_WRITE_ACCESS)
/* Mask for removing private file access flags */
-#define H5F_ACC_PUBLIC_FLAGS 0x001fu
+#define H5F_ACC_PUBLIC_FLAGS 0x007fu
/* Free space section+aggregator merge flags */
#define H5F_FS_MERGE_METADATA 0x01 /* Section can merge with metadata aggregator */
@@ -131,12 +132,39 @@
#define H5F_SUPERBLOCK_VARLEN_SIZE(v, sizeof_addr, sizeof_size) ( \
(v == 0 ? H5F_SUPERBLOCK_VARLEN_SIZE_V0(sizeof_addr, sizeof_size) : 0) \
+ (v == 1 ? H5F_SUPERBLOCK_VARLEN_SIZE_V1(sizeof_addr, sizeof_size) : 0) \
- + (v == 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(sizeof_addr) : 0))
+ + (v >= 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(sizeof_addr) : 0))
/* Total size of superblock, depends on superblock version */
#define H5F_SUPERBLOCK_SIZE(s) ( H5F_SUPERBLOCK_FIXED_SIZE \
+ H5F_SUPERBLOCK_VARLEN_SIZE((s)->super_vers, (s)->sizeof_addr, (s)->sizeof_size))
+/* For superblock version 0 & 1:
+ Offset to the file consistency flags (status_flags) in the superblock (excluding H5F_SUPERBLOCK_FIXED_SIZE) */
+#define H5F_SUPER_STATUS_OFF_V01 \
+ (2 /* freespace, and root group versions */ \
+ + 1 /* reserved */ \
+ + 3 /* shared header vers, size of address, size of lengths */ \
+ + 1 /* reserved */ \
+ + 4) /* group leaf k, group internal k */
+
+#define H5F_SUPER_STATUS_OFF(v) (v >= 2 ? 2 : H5F_SUPER_STATUS_OFF_V01)
+
+/* Offset to the file consistency flags (status_flags) in the superblock */
+#define H5F_SUPER_STATUS_FLAGS_OFF(v) (H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPER_STATUS_OFF(v))
+
+/* Size of file consistency flags (status_flags) in the superblock */
+#define H5F_SUPER_STATUS_FLAGS_SIZE(v) (v >= 2 ? 1 : 4)
+
+/*
+ * User data for superblock protect in H5F_super_read:
+ * dirtied: the superblock is modifed or not
+ * initial read: superlock read upon the file's initial open--
+ * whether to skip the check for truncated file in H5F_sblock_load()
+ */
+typedef struct H5F_super_ud_t {
+ hbool_t dirtied;
+ hbool_t initial_read;
+} H5F_super_ud_t;
/* Forward declaration external file cache struct used below (defined in
* H5Fefc.c) */
@@ -152,9 +180,11 @@ typedef struct H5F_superblock_cache_ud_t {
unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree key values for each type */
haddr_t stored_eof; /* End-of-file in file */
hbool_t drvrinfo_removed; /* Indicate if the driver info was removed */
+ unsigned super_vers; /* Superblock version obtained in get_load_size callback.
+ * It will be used later in verify_chksum callback
+ */
} H5F_superblock_cache_ud_t;
-/* Structure for passing 'user data' to driver info block cache callbacks */
typedef struct H5F_drvrinfo_cache_ud_t {
H5F_t *f; /* Pointer to file */
haddr_t driver_addr; /* address of driver info block */
@@ -259,6 +289,10 @@ struct H5F_file_t {
/* metadata cache. This structure is */
/* fixed at creation time and should */
/* not change thereafter. */
+ hbool_t use_mdc_logging; /* Set when metadata logging is desired */
+ hbool_t start_mdc_log_on_access; /* set when mdc logging should */
+ /* begin on file access/create */
+ char *mdc_log_location; /* location of mdc log */
hid_t fcpl_id; /* File creation property list ID */
H5F_close_degree_t fc_degree; /* File close behavior degree */
size_t rdcc_nslots; /* Size of raw data chunk cache (slots) */
@@ -269,6 +303,7 @@ struct H5F_file_t {
hsize_t alignment; /* Alignment */
unsigned gc_ref; /* Garbage-collect references? */
hbool_t latest_format; /* Always use the latest format? */
+ unsigned latest_flags; /* The latest version support */
hbool_t store_msg_crt_idx; /* Store creation index for object header messages? */
unsigned ncwfs; /* Num entries on cwfs list */
struct H5HG_heap_t **cwfs; /* Global heap cache */
@@ -293,6 +328,12 @@ struct H5F_file_t {
/* Metadata accumulator information */
H5F_meta_accum_t accum; /* Metadata accumulator info */
+
+ /* Metadata retry info */
+ unsigned read_attempts; /* The # of reads to try when reading metadata with checksum */
+ unsigned retries_nbins; /* # of bins for each retries[] */
+ uint32_t *retries[H5AC_NTYPES]; /* Track # of read retries for metdata items with checksum */
+ H5F_object_flush_t object_flush; /* Information for object flush callback */
};
/*
@@ -348,7 +389,7 @@ H5_DLL herr_t H5F_mount_count_ids(H5F_t *f, unsigned *nopen_files, unsigned *nop
/* Superblock related routines */
H5_DLL herr_t H5F__super_init(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read);
H5_DLL herr_t H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size);
H5_DLL herr_t H5F__super_free(H5F_super_t *sblock);
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index fae8b99..6be0cff 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -283,6 +283,7 @@
#define H5F_FILE_ID(F) ((F)->file_id)
#define H5F_PARENT(F) ((F)->parent)
#define H5F_NMOUNTS(F) ((F)->nmounts)
+#define H5F_GET_READ_ATTEMPTS(F) ((F)->shared->read_attempts)
#define H5F_DRIVER_ID(F) ((F)->shared->lf->driver_id)
#define H5F_GET_FILENO(F,FILENUM) ((FILENUM) = (F)->shared->lf->fileno)
#define H5F_HAS_FEATURE(F,FL) ((F)->shared->lf->feature_flags & (FL))
@@ -306,6 +307,7 @@
#define H5F_SIEVE_BUF_SIZE(F) ((F)->shared->sieve_buf_size)
#define H5F_GC_REF(F) ((F)->shared->gc_ref)
#define H5F_USE_LATEST_FORMAT(F) ((F)->shared->latest_format)
+#define H5F_USE_LATEST_FLAGS(F,FL) ((F)->shared->latest_flags & (FL))
#define H5F_STORE_MSG_CRT_IDX(F) ((F)->shared->store_msg_crt_idx)
#define H5F_SET_STORE_MSG_CRT_IDX(F, FL) ((F)->shared->store_msg_crt_idx = (FL))
#define H5F_GRP_BTREE_SHARED(F) ((F)->shared->grp_btree_shared)
@@ -325,6 +327,7 @@
#define H5F_FILE_ID(F) (H5F_get_file_id(F))
#define H5F_PARENT(F) (H5F_get_parent(F))
#define H5F_NMOUNTS(F) (H5F_get_nmounts(F))
+#define H5F_GET_READ_ATTEMPTS(F) (H5F_get_read_attempts(F))
#define H5F_DRIVER_ID(F) (H5F_get_driver_id(F))
#define H5F_GET_FILENO(F,FILENUM) (H5F_get_fileno((F), &(FILENUM)))
#define H5F_HAS_FEATURE(F,FL) (H5F_has_feature(F,FL))
@@ -348,6 +351,7 @@
#define H5F_SIEVE_BUF_SIZE(F) (H5F_sieve_buf_size(F))
#define H5F_GC_REF(F) (H5F_gc_ref(F))
#define H5F_USE_LATEST_FORMAT(F) (H5F_use_latest_format(F))
+#define H5F_USE_LATEST_FLAGS(F,FL) (H5F_use_latest_flags(F,FL))
#define H5F_STORE_MSG_CRT_IDX(F) (H5F_store_msg_crt_idx(F))
#define H5F_SET_STORE_MSG_CRT_IDX(F, FL) (H5F_set_store_msg_crt_idx((F), (FL)))
#define H5F_GRP_BTREE_SHARED(F) (H5F_grp_btree_shared(F))
@@ -450,8 +454,15 @@
#define H5F_ACS_MULTI_TYPE_NAME "multi_type" /* Data type in multi file driver */
#define H5F_ACS_LATEST_FORMAT_NAME "latest_format" /* 'Use latest format version' flag */
#define H5F_ACS_WANT_POSIX_FD_NAME "want_posix_fd" /* Internal: query the file descriptor from the core VFD, instead of the memory address */
+#define H5F_ACS_METADATA_READ_ATTEMPTS_NAME "metadata_read_attempts" /* # of metadata read attempts */
+#define H5F_ACS_OBJECT_FLUSH_CB_NAME "object_flush_cb" /* Object flush callback */
#define H5F_ACS_EFC_SIZE_NAME "efc_size" /* Size of external file cache */
#define H5F_ACS_FILE_IMAGE_INFO_NAME "file_image_info" /* struct containing initial file image and callback info */
+#define H5F_ACS_CLEAR_STATUS_FLAGS_NAME "clear_status_flags" /* Whether to clear superblock status_flags (private property only used by h5clear) */
+#define H5F_ACS_USE_MDC_LOGGING_NAME "use_mdc_logging" /* Whether to use metadata cache logging */
+#define H5F_ACS_MDC_LOG_LOCATION_NAME "mdc_log_location" /* Name of metadata cache log location */
+#define H5F_ACS_START_MDC_LOG_ON_ACCESS_NAME "start_mdc_log_on_access" /* Whether logging starts on file create/open */
+
#define H5F_ACS_CORE_WRITE_TRACKING_FLAG_NAME "core_write_tracking_flag" /* Whether or not core VFD backing store write tracking is enabled */
#define H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_NAME "core_write_tracking_page_size" /* The page size in kiB when core VFD write tracking is enabled */
@@ -472,7 +483,8 @@
#define HDF5_SUPERBLOCK_VERSION_DEF 0 /* The default super block format */
#define HDF5_SUPERBLOCK_VERSION_1 1 /* Version with non-default B-tree 'K' value */
#define HDF5_SUPERBLOCK_VERSION_2 2 /* Revised version with superblock extension and checksum */
-#define HDF5_SUPERBLOCK_VERSION_LATEST HDF5_SUPERBLOCK_VERSION_2 /* The maximum super block format */
+#define HDF5_SUPERBLOCK_VERSION_3 3 /* With file locking and consistency flags (at least this version for SWMR support) */
+#define HDF5_SUPERBLOCK_VERSION_LATEST HDF5_SUPERBLOCK_VERSION_3 /* The maximum super block format */
#define HDF5_FREESPACE_VERSION 0 /* of the Free-Space Info */
#define HDF5_OBJECTDIR_VERSION 0 /* of the Object Directory format */
#define HDF5_SHAREDHEADER_VERSION 0 /* of the Shared-Header Info */
@@ -494,12 +506,20 @@
/* Default free space section threshold used by free-space managers */
#define H5F_FREE_SPACE_THRESHOLD_DEF 1
+/* Metadata read attempt values */
+#define H5F_METADATA_READ_ATTEMPTS 1 /* Default # of read attempts for non-SWMR access */
+#define H5F_SWMR_METADATA_READ_ATTEMPTS 100 /* Default # of read attempts for SWMR access */
+
/* Macros to define signatures of all objects in the file */
/* Size of signature information (on disk) */
/* (all on-disk signatures should be this length) */
#define H5_SIZEOF_MAGIC 4
+/* Size of checksum information (on disk) */
+/* (all on-disk checksums should be this length) */
+#define H5_SIZEOF_CHKSUM 4
+
/* v1 B-tree node signature */
#define H5B_MAGIC "TREE"
@@ -545,6 +565,22 @@
#define H5SM_LIST_MAGIC "SMLI" /* Shared Message List */
+/* Latest format will activate the following latest version support */
+/* "latest_flags" in H5F_file_t */
+#define H5F_LATEST_DATATYPE 0x0001
+#define H5F_LATEST_DATASPACE 0x0002
+#define H5F_LATEST_ATTRIBUTE 0x0004
+#define H5F_LATEST_FILL_MSG 0x0008
+#define H5F_LATEST_PLINE_MSG 0x0010
+#define H5F_LATEST_LAYOUT_MSG 0x0020
+#define H5F_LATEST_NO_MOD_TIME_MSG 0x0040
+#define H5F_LATEST_STYLE_GROUP 0x0080
+#define H5F_LATEST_OBJ_HEADER 0x0100
+#define H5F_LATEST_SUPERBLOCK 0x0200
+#define H5F_LATEST_ALL_FLAGS (H5F_LATEST_DATATYPE | H5F_LATEST_DATASPACE | H5F_LATEST_ATTRIBUTE | H5F_LATEST_FILL_MSG | H5F_LATEST_PLINE_MSG | H5F_LATEST_LAYOUT_MSG | H5F_LATEST_NO_MOD_TIME_MSG | H5F_LATEST_STYLE_GROUP | H5F_LATEST_OBJ_HEADER | H5F_LATEST_SUPERBLOCK)
+
+#define H5F_LATEST_DSET_MSG_FLAGS (H5F_LATEST_FILL_MSG | H5F_LATEST_PLINE_MSG | H5F_LATEST_LAYOUT_MSG)
+
/****************************/
/* Library Private Typedefs */
/****************************/
@@ -565,6 +601,13 @@ typedef struct H5F_file_t H5F_file_t;
/* Block aggregation structure */
typedef struct H5F_blk_aggr_t H5F_blk_aggr_t;
+/* Structure for object flush callback property (H5Pset_object_flush_cb)*/
+typedef struct H5F_object_flush_t {
+ H5F_flush_cb_t func; /* The callback function */
+ void *udata; /* User data */
+} H5F_object_flush_t;
+
+
/* I/O Info for an operation */
typedef struct H5F_io_info_t {
const H5F_t *f; /* File object */
@@ -607,10 +650,12 @@ H5_DLL hid_t H5F_get_file_id(const H5F_t *f);
H5_DLL ssize_t H5F_get_file_image(H5F_t *f, void *buf_ptr, size_t buf_len);
H5_DLL H5F_t *H5F_get_parent(const H5F_t *f);
H5_DLL unsigned H5F_get_nmounts(const H5F_t *f);
+H5_DLL unsigned H5F_get_read_attempts(const H5F_t *f);
H5_DLL hid_t H5F_get_access_plist(H5F_t *f, hbool_t app_ref);
H5_DLL hid_t H5F_get_id(H5F_t *file, hbool_t app_ref);
H5_DLL herr_t H5F_get_obj_count(const H5F_t *f, unsigned types, hbool_t app_ref, size_t *obj_id_count_ptr);
H5_DLL herr_t H5F_get_obj_ids(const H5F_t *f, unsigned types, size_t max_objs, hid_t *oid_list, hbool_t app_ref, size_t *obj_id_count_ptr);
+H5_DLL haddr_t H5F_get_next_proxy_addr(const H5F_t *f);
/* Functions than retrieve values set/cached from the superblock/FCPL */
H5_DLL haddr_t H5F_get_base_addr(const H5F_t *f);
@@ -633,6 +678,7 @@ H5_DLL double H5F_rdcc_w0(const H5F_t *f);
H5_DLL size_t H5F_sieve_buf_size(const H5F_t *f);
H5_DLL unsigned H5F_gc_ref(const H5F_t *f);
H5_DLL hbool_t H5F_use_latest_format(const H5F_t *f);
+H5_DLL unsigned H5F_use_latest_flags(const H5F_t *f, unsigned fl);
H5_DLL hbool_t H5F_store_msg_crt_idx(const H5F_t *f);
H5_DLL herr_t H5F_set_store_msg_crt_idx(H5F_t *f, hbool_t flag);
H5_DLL struct H5UC_t *H5F_grp_btree_shared(const H5F_t *f);
@@ -659,6 +705,26 @@ H5_DLL herr_t H5F_block_read(const H5F_t *f, H5FD_mem_t type, haddr_t addr,
H5_DLL herr_t H5F_block_write(const H5F_t *f, H5FD_mem_t type, haddr_t addr,
size_t size, hid_t dxpl_id, const void *buf);
+/* Functions that flush or evict */
+H5_DLL herr_t H5F_flush_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id);
+H5_DLL herr_t H5F_evict_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id);
+H5_DLL herr_t H5F_evict_cache_entries(H5F_t *f, hid_t dxpl_id);
+
+
+/* Functions that read & verify a piece of metadata with checksum */
+H5_DLL herr_t H5F_read_check_metadata(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
+ unsigned actype, haddr_t addr, size_t read_size, size_t chk_size,
+ uint8_t *buf/*out*/);
+H5_DLL herr_t H5F_get_checksums(const uint8_t *buf, size_t chk_size, uint32_t *s_chksum, uint32_t *c_chksum);
+
+/* Routine to track the # of retries */
+H5_DLL herr_t H5F_track_metadata_read_retries(H5F_t *f, unsigned actype, unsigned retries);
+H5_DLL herr_t H5F_set_retries(H5F_t *f);
+
+/* Routine to invoke callback function upon object flush */
+H5_DLL herr_t H5F_object_flush_cb(H5F_t *f, hid_t obj_id);
+
+
/* Address-related functions */
H5_DLL void H5F_addr_encode(const H5F_t *f, uint8_t **pp, haddr_t addr);
H5_DLL void H5F_addr_encode_len(size_t addr_len, uint8_t **pp, haddr_t addr);
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index aa6cc2a..fa45d10 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -50,6 +50,23 @@
#define H5F_ACC_EXCL (H5CHECK 0x0004u) /*fail if file already exists*/
/* NOTE: 0x0008u was H5F_ACC_DEBUG, now deprecated */
#define H5F_ACC_CREAT (H5CHECK 0x0010u) /*create non-existing files */
+#define H5F_ACC_SWMR_WRITE (H5CHECK 0x0020u) /*indicate that this file is
+ * open for writing in a
+ * single-writer/multi-reader (SWMR)
+ * scenario. Note that the
+ * process(es) opening the file
+ * for reading must open the file
+ * with RDONLY access, and use
+ * the special "SWMR_READ" access
+ * flag. */
+#define H5F_ACC_SWMR_READ (H5CHECK 0x0040u) /*indicate that this file is
+ * open for reading in a
+ * single-writer/multi-reader (SWMR)
+ * scenario. Note that the
+ * process(es) opening the file
+ * for SWMR reading must also
+ * open the file with the RDONLY
+ * flag. */
/* Value passed to H5Pset_elink_acc_flags to cause flags to be taken from the
* parent file. */
@@ -170,6 +187,17 @@ typedef enum H5F_file_space_type_t {
H5F_FILE_SPACE_NTYPES /* must be last */
} H5F_file_space_type_t;
+/* Data structure to report the collection of read retries for metadata items with checksum */
+/* Used by public routine H5Fget_metadata_read_retry_info() */
+#define H5F_NUM_METADATA_READ_RETRY_TYPES 21
+typedef struct H5F_retry_info_t {
+ unsigned nbins;
+ uint32_t *retries[H5F_NUM_METADATA_READ_RETRY_TYPES];
+} H5F_retry_info_t;
+
+/* Callback for H5Pset_object_flush_cb() in a file access property list */
+typedef herr_t (*H5F_flush_cb_t)(hid_t object_id, void *udata);
+
#ifdef __cplusplus
extern "C" {
@@ -208,9 +236,16 @@ H5_DLL herr_t H5Fget_mdc_size(hid_t file_id,
H5_DLL herr_t H5Freset_mdc_hit_rate_stats(hid_t file_id);
H5_DLL ssize_t H5Fget_name(hid_t obj_id, char *name, size_t size);
H5_DLL herr_t H5Fget_info2(hid_t obj_id, H5F_info2_t *finfo);
+H5_DLL herr_t H5Fget_metadata_read_retry_info(hid_t file_id, H5F_retry_info_t *info);
+H5_DLL herr_t H5Fstart_swmr_write(hid_t file_id);
H5_DLL ssize_t H5Fget_free_sections(hid_t file_id, H5F_mem_t type,
size_t nsects, H5F_sect_info_t *sect_info/*out*/);
H5_DLL herr_t H5Fclear_elink_file_cache(hid_t file_id);
+H5_DLL herr_t H5Fstart_mdc_logging(hid_t file_id);
+H5_DLL herr_t H5Fstop_mdc_logging(hid_t file_id);
+H5_DLL herr_t H5Fget_mdc_logging_status(hid_t file_id,
+ /*OUT*/ hbool_t *is_enabled,
+ /*OUT*/ hbool_t *is_currently_logging);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5Fset_mpi_atomicity(hid_t file_id, hbool_t flag);
H5_DLL herr_t H5Fget_mpi_atomicity(hid_t file_id, hbool_t *flag);
diff --git a/src/H5Fquery.c b/src/H5Fquery.c
index 05667ac..0baec78 100644
--- a/src/H5Fquery.c
+++ b/src/H5Fquery.c
@@ -322,6 +322,29 @@ H5F_get_nmounts(const H5F_t *f)
/*-------------------------------------------------------------------------
+ * Function: H5F_get_read_attempts
+ *
+ * Purpose: Retrieve the file's 'read_attempts' value
+ *
+ * Return: '# of read attempts' on success/abort on failure (shouldn't fail)
+ *
+ * Programmer: Vaili Choi; Sept 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+H5F_get_read_attempts(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+
+ FUNC_LEAVE_NOAPI(f->shared->read_attempts)
+} /* end H5F_get_read_attempts() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_get_fcpl
*
* Purpose: Retrieve the value of a file's FCPL.
@@ -808,6 +831,34 @@ H5F_use_latest_format(const H5F_t *f)
/*-------------------------------------------------------------------------
+ * Function: H5F_use_latest_flags
+ *
+ * Purpose: Retrieve the 'latest version support' for the file.
+ *
+ * Return: Success: Non-negative, the requested 'version support'
+ *
+ * Failure: (can't happen)
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * Mar 5 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+H5F_use_latest_flags(const H5F_t *f, unsigned fl)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->latest_flags & (fl))
+} /* end H5F_use_latest_flags() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F_get_fc_degree
*
* Purpose: Retrieve the 'file close degree' for the file.
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index a0f9536..da3bf3d 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -115,7 +115,7 @@ H5F_super_ext_create(H5F_t *f, hid_t dxpl_id, H5O_loc_t *ext_ptr)
* extension.
*/
H5O_loc_reset(ext_ptr);
- if(H5O_create(f, dxpl_id, 0, (size_t)1, H5P_GROUP_CREATE_DEFAULT, ext_ptr) < 0)
+ if(H5O_create(f, dxpl_id, (size_t)0, (size_t)1, H5P_GROUP_CREATE_DEFAULT, ext_ptr) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "unable to create superblock extension")
/* Record the address of the superblock extension */
@@ -239,7 +239,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__super_read(H5F_t *f, hid_t dxpl_id)
+H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
{
H5P_genplist_t *dxpl = NULL; /* DXPL object */
H5AC_ring_t ring, orig_ring = H5AC_RING_INV;
@@ -250,6 +250,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
haddr_t super_addr; /* Absolute address of superblock */
haddr_t eof; /* End of file address */
unsigned rw_flags; /* Read/write permissions for file */
+ hbool_t skip_eof_check = FALSE; /* Whether to skip checking the EOF value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
@@ -317,6 +318,14 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_3)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "invalid superblock version for SWMR_WRITE")
+
+ /* Enable all latest version support when file has v3 superblock */
+ if(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3)
+ f->shared->latest_flags |= H5F_LATEST_ALL_FLAGS;
+
/* Pin the superblock in the cache */
if(H5AC_pin_protected_entry(sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTPIN, FAIL, "unable to pin superblock")
@@ -400,12 +409,39 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
* possible is if the first file of a family of files was opened
* individually.
*/
- if(HADDR_UNDEF == (eof = H5FD_get_eof(f->shared->lf, H5FD_MEM_DEFAULT)))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to determine file size")
+ /* Can skip this test when it is not the initial file open--
+ * H5F_super_read() call from H5F_evict_tagged_metadata() for
+ * refreshing object.
+ * When flushing file buffers and fractal heap is involved,
+ * the library will allocate actual space for tmp addresses
+ * via the file layer. The aggregator allocates a block,
+ * thus the eoa might be greater than eof.
+ * Note: the aggregator is changed again after being reset
+ * earlier before H5AC_flush due to allocation of tmp addresses.
+ */
+ /* The EOF check must be skipped when the file is opened for SWMR read,
+ * as the file can appear truncated if only part of it has been
+ * been flushed to disk by the SWMR writer process.
+ */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_READ) {
+ /*
+ * When the file is opened for SWMR read access, skip the check if:
+ * --the file is already marked for SWMR writing and
+ * --the file has version 3 superblock for SWMR support
+ */
+ if((sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS) &&
+ (sblock->status_flags & H5F_SUPER_WRITE_ACCESS) &&
+ sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3)
+ skip_eof_check = TRUE;
+ } /* end if */
+ if(!skip_eof_check && initial_read) {
+ if(HADDR_UNDEF == (eof = H5FD_get_eof(f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to determine file size")
- /* (Account for the stored EOA being absolute offset -QAK) */
- if((eof + sblock->base_addr) < udata.stored_eof)
- HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, FAIL, "truncated file: eof = %llu, sblock->base_addr = %llu, stored_eoa = %llu", (unsigned long long)eof, (unsigned long long)sblock->base_addr, (unsigned long long)udata.stored_eof)
+ /* (Account for the stored EOA being absolute offset -QAK) */
+ if((eof + sblock->base_addr) < udata.stored_eof)
+ HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, FAIL, "truncated file: eof = %llu, sblock->base_addr = %llu, stored_eof = %llu", (unsigned long long)eof, (unsigned long long)sblock->base_addr, (unsigned long long)udata.stored_eof)
+ } /* end if */
/*
* Tell the file driver how much address space has already been
@@ -742,8 +778,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
- /* Bump superblock version if we are to use the latest version of the format */
- if(f->shared->latest_format)
+ /* Bump superblock version if latest superblock version support is enabled */
+ if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_SUPERBLOCK))
super_vers = HDF5_SUPERBLOCK_VERSION_LATEST;
/* Bump superblock version to create superblock extension for SOHM info */
else if(f->shared->sohm_nindexes > 0)
diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c
index c2c7bc9..6f37ff6 100644
--- a/src/H5Fsuper_cache.c
+++ b/src/H5Fsuper_cache.c
@@ -67,7 +67,10 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static herr_t H5F__cache_superblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5F__cache_superblock_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5F__cache_superblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5F__cache_superblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5F__cache_superblock_image_len(const void *thing,
@@ -81,7 +84,9 @@ static herr_t H5F__cache_superblock_serialize(const H5F_t *f, void *image, size_
void *thing);
static herr_t H5F__cache_superblock_free_icr(void *thing);
-static herr_t H5F__cache_drvrinfo_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5F__cache_drvrinfo_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static void *H5F__cache_drvrinfo_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5F__cache_drvrinfo_image_len(const void *thing,
@@ -103,11 +108,12 @@ const H5AC_class_t H5AC_SUPERBLOCK[1] = {{
H5FD_MEM_SUPER, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5F__cache_superblock_get_load_size,/* 'get_load_size' callback */
+ H5F__cache_superblock_verify_chksum, /* 'verify_chksum' callback */
H5F__cache_superblock_deserialize, /* 'deserialize' callback */
H5F__cache_superblock_image_len, /* 'image_len' callback */
H5F__cache_superblock_pre_serialize,/* 'pre_serialize' callback */
H5F__cache_superblock_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ NULL, /* 'notify' callback */
H5F__cache_superblock_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -120,6 +126,7 @@ const H5AC_class_t H5AC_DRVRINFO[1] = {{
H5FD_MEM_SUPER, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5F__cache_drvrinfo_get_load_size, /* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
H5F__cache_drvrinfo_deserialize, /* 'deserialize' callback */
H5F__cache_drvrinfo_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -159,22 +166,133 @@ H5FL_EXTERN(H5F_super_t);
*-------------------------------------------------------------------------
*/
static herr_t
-H5F__cache_superblock_get_load_size(const void H5_ATTR_UNUSED *udata, size_t *image_len)
+H5F__cache_superblock_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- FUNC_ENTER_STATIC_NOERR
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */
+ unsigned super_vers; /* Superblock version */
+ uint8_t sizeof_addr; /* Size of offsets in the file (in bytes) */
+ uint8_t sizeof_size; /* Size of lengths in the file (in bytes) */
+ size_t variable_size; /* Variable size of superblock */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(image_len);
- /* Set the initial image length size */
- *image_len = H5F_SUPERBLOCK_FIXED_SIZE + /* Fixed size of superblock */
- H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE;
+ if(image == NULL) {
+ HDassert(actual_len == NULL);
+
+ /* Set the initial image length size */
+ *image_len = H5F_SUPERBLOCK_FIXED_SIZE + /* Fixed size of superblock */
+ H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE;
+ } else { /* compute actual_len */
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+
+ image += H5F_SIGNATURE_LEN;
+
+ /* Superblock version */
+ super_vers = *image++;
+ if(super_vers > HDF5_SUPERBLOCK_VERSION_LATEST)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad superblock version number")
+
+ /* Save the version to be used in verify_chksum callback */
+ udata->super_vers = super_vers;
+
+ /* Sanity check */
+ HDassert(((size_t)(image - (const uint8_t *)_image)) == H5F_SUPERBLOCK_FIXED_SIZE);
+ HDassert(*image_len >= H5F_SUPERBLOCK_FIXED_SIZE + 6);
+
+ /* Determine the size of addresses & size of offsets, for computing the
+ * variable-sized portion of the superblock.
+ */
+ if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ sizeof_addr = image[4];
+ sizeof_size = image[5];
+ } /* end if */
+ else {
+ sizeof_addr = image[0];
+ sizeof_size = image[1];
+ } /* end else */
+ if(sizeof_addr != 2 && sizeof_addr != 4 &&
+ sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number in an address")
+ if(sizeof_size != 2 && sizeof_size != 4 &&
+ sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number for object size")
+
+ /* Determine the size of the variable-length part of the superblock */
+ variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(super_vers, sizeof_addr, sizeof_size);
+ HDassert(variable_size > 0);
+
+ /* Handle metadata cache retry for variable-sized portion of the superblock */
+ if(*image_len != (H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) {
+
+ /* Sanity check */
+ HDassert(*image_len == (H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE));
+
+ /* Make certain we can read the variabled-sized portion of the superblock */
+ if(H5F__set_eoa(udata->f, H5FD_MEM_SUPER, (haddr_t)(H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
+
+ *actual_len = H5F_SUPERBLOCK_FIXED_SIZE + variable_size;
+
+ } /* end if */
+ }
- FUNC_LEAVE_NOAPI(SUCCEED)
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__cache_superblock_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5F__cache_superblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5F__cache_superblock_verify_chksum(const void *_image, size_t len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+
+ /* No checksum for version 0 & 1 */
+ if(udata->super_vers >= HDF5_SUPERBLOCK_VERSION_2) {
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__cache_superblock_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F__cache_superblock_deserialize
*
* Purpose: Loads an object from the disk.
@@ -195,7 +313,7 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
H5F_super_t *sblock = NULL; /* File's superblock */
H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
- size_t variable_size; /* Sariable size of superblock */
+ size_t variable_size; /* Variable size of superblock */
unsigned super_vers; /* Superblock version */
uint8_t sizeof_addr; /* Size of offsets in the file (in bytes) */
uint8_t sizeof_size; /* Size of lengths in the file (in bytes) */
@@ -212,7 +330,6 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
if(NULL == (sblock = H5FL_CALLOC(H5F_super_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Skip over signature (already checked when locating the superblock) */
image += H5F_SIGNATURE_LEN;
/* Superblock version */
@@ -251,176 +368,161 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(super_vers, sizeof_addr, sizeof_size);
HDassert(variable_size > 0);
- /* Handle metadata cache retry for variable-sized portion of the superblock */
- if(len != (H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) {
- /* Sanity check */
- HDassert(len == (H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE));
+ HDassert(len == (H5F_SUPERBLOCK_FIXED_SIZE + variable_size));
- /* Make certain we can read the variabled-sized portion of the superblock */
- if(H5F__set_eoa(udata->f, H5FD_MEM_SUPER, (haddr_t)(H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "set end of space allocation request failed")
- } /* end if */
- else {
- /* Check for older version of superblock format */
- if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
- uint32_t status_flags; /* File status flags */
- unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
- unsigned snode_btree_k; /* B-tree symbol table internal node 'K' value */
- unsigned chunk_btree_k; /* B-tree chunk internal node 'K' value */
-
- /* Freespace version (hard-wired) */
- if(HDF5_FREESPACE_VERSION != *image++)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad free space version number")
-
- /* Root group version number (hard-wired) */
- if(HDF5_OBJECTDIR_VERSION != *image++)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad object directory version number")
-
- /* Skip over reserved byte */
- image++;
-
- /* Shared header version number (hard-wired) */
- if(HDF5_SHAREDHEADER_VERSION != *image++)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number")
-
- /* Size of file addresses */
- sizeof_addr = *image++;
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- sblock->sizeof_addr = sizeof_addr;
- udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
-
- /* Size of file sizes */
- sizeof_size = *image++;
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- sblock->sizeof_size = sizeof_size;
- udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
-
- /* Skip over reserved byte */
- image++;
-
- /* Various B-tree sizes */
- UINT16DECODE(image, sym_leaf_k);
- if(sym_leaf_k == 0)
- HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad symbol table leaf node 1/2 rank")
- udata->sym_leaf_k = sym_leaf_k; /* Keep a local copy also */
-
- /* Need 'get' call to set other array values */
- UINT16DECODE(image, snode_btree_k);
- if(snode_btree_k == 0)
- HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad 1/2 rank for btree internal nodes")
- udata->btree_k[H5B_SNODE_ID] = snode_btree_k;
-
- /*
- * Delay setting the value in the property list until we've checked
- * for the indexed storage B-tree internal 'K' value later.
- */
-
- /* File status flags (not really used yet) */
- UINT32DECODE(image, status_flags);
- HDassert(status_flags <= 255);
- sblock->status_flags = (uint8_t)status_flags;
- if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
-
- /*
- * If the superblock version # is greater than 0, read in the indexed
- * storage B-tree internal 'K' value
- */
- if(super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
- UINT16DECODE(image, chunk_btree_k);
-
- /* Reserved bytes are present only in version 1 */
- if(super_vers == HDF5_SUPERBLOCK_VERSION_1)
- image += 2; /* reserved */
- } /* end if */
- else
- chunk_btree_k = HDF5_BTREE_CHUNK_IK_DEF;
- udata->btree_k[H5B_CHUNK_ID] = chunk_btree_k;
-
- /* Remainder of "variable-sized" portion of superblock */
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr/*out*/);
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr/*out*/);
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->driver_addr/*out*/);
-
- /* Allocate space for the root group symbol table entry */
- HDassert(!sblock->root_ent);
- if(NULL == (sblock->root_ent = (H5G_entry_t *)H5MM_calloc(sizeof(H5G_entry_t))))
- HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "can't allocate space for root group symbol table entry")
-
- /* decode the root group symbol table entry */
- if(H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode root group symbol table entry")
-
- /* Set the root group address to the correct value */
- sblock->root_addr = sblock->root_ent->header;
-
- /* This step is for h5repart tool only. If user wants to change file driver
- * from family to sec2 while using h5repart, set the driver address to
- * undefined to let the library ignore the family driver information saved
- * in the superblock.
- */
- if(udata->ignore_drvrinfo && H5F_addr_defined(sblock->driver_addr)) {
- /* Eliminate the driver info */
- sblock->driver_addr = HADDR_UNDEF;
- udata->drvrinfo_removed = TRUE;
- } /* end if */
+ /* Check for older version of superblock format */
+ if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ uint32_t status_flags; /* File status flags */
+ unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
+ unsigned snode_btree_k; /* B-tree symbol table internal node 'K' value */
+ unsigned chunk_btree_k; /* B-tree chunk internal node 'K' value */
+
+ /* Freespace version (hard-wired) */
+ if(HDF5_FREESPACE_VERSION != *image++)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad free space version number")
+
+ /* Root group version number (hard-wired) */
+ if(HDF5_OBJECTDIR_VERSION != *image++)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad object directory version number")
+
+ /* Skip over reserved byte */
+ image++;
+
+ /* Shared header version number (hard-wired) */
+ if(HDF5_SHAREDHEADER_VERSION != *image++)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number")
+
+ /* Size of file addresses */
+ sizeof_addr = *image++;
+ if(sizeof_addr != 2 && sizeof_addr != 4 &&
+ sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
+ sblock->sizeof_addr = sizeof_addr;
+ udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
+
+ /* Size of file sizes */
+ sizeof_size = *image++;
+ if(sizeof_size != 2 && sizeof_size != 4 &&
+ sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
+ sblock->sizeof_size = sizeof_size;
+ udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
+
+ /* Skip over reserved byte */
+ image++;
+
+ /* Various B-tree sizes */
+ UINT16DECODE(image, sym_leaf_k);
+ if(sym_leaf_k == 0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad symbol table leaf node 1/2 rank")
+ udata->sym_leaf_k = sym_leaf_k; /* Keep a local copy also */
+
+ /* Need 'get' call to set other array values */
+ UINT16DECODE(image, snode_btree_k);
+ if(snode_btree_k == 0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, NULL, "bad 1/2 rank for btree internal nodes")
+ udata->btree_k[H5B_SNODE_ID] = snode_btree_k;
+
+ /*
+ * Delay setting the value in the property list until we've checked
+ * for the indexed storage B-tree internal 'K' value later.
+ */
- /* NOTE: Driver info block is decoded separately, later */
+ /* File status flags (not really used yet) */
+ UINT32DECODE(image, status_flags);
+ HDassert(status_flags <= 255);
+ sblock->status_flags = (uint8_t)status_flags;
+ if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
- } /* end if */
- else {
- uint32_t computed_chksum; /* Computed checksum */
- uint32_t read_chksum; /* Checksum read from file */
-
- /* Size of file addresses */
- sizeof_addr = *image++;
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- sblock->sizeof_addr = sizeof_addr;
- udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
-
- /* Size of file sizes */
- sizeof_size = *image++;
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- sblock->sizeof_size = sizeof_size;
- udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
-
- /* File status flags (not really used yet) */
- sblock->status_flags = *image++;
- if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
-
- /* Base, superblock extension, end of file & root group object header addresses */
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr/*out*/);
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr/*out*/);
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
- H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->root_addr/*out*/);
-
- /* Compute checksum for superblock */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
-
- /* Decode checksum */
- UINT32DECODE(image, read_chksum);
-
- /* Verify correct checksum */
- if(read_chksum != computed_chksum)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "bad checksum on driver information block")
-
- /* The Driver Information Block may not appear with the version
- * 2 super block. Thus we set the driver_addr field of the in
- * core representation of the super block HADDR_UNDEF to prevent
- * any attempt to load the Driver Information Block.
- */
- sblock->driver_addr = HADDR_UNDEF;
- } /* end else */
+ /*
+ * If the superblock version # is greater than 0, read in the indexed
+ * storage B-tree internal 'K' value
+ */
+ if(super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
+ UINT16DECODE(image, chunk_btree_k);
+
+ /* Reserved bytes are present only in version 1 */
+ if(super_vers == HDF5_SUPERBLOCK_VERSION_1)
+ image += 2; /* reserved */
+ } /* end if */
+ else
+ chunk_btree_k = HDF5_BTREE_CHUNK_IK_DEF;
+ udata->btree_k[H5B_CHUNK_ID] = chunk_btree_k;
+
+ /* Remainder of "variable-sized" portion of superblock */
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->driver_addr/*out*/);
+
+ /* Allocate space for the root group symbol table entry */
+ HDassert(!sblock->root_ent);
+ if(NULL == (sblock->root_ent = (H5G_entry_t *)H5MM_calloc(sizeof(H5G_entry_t))))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "can't allocate space for root group symbol table entry")
+
+ /* decode the root group symbol table entry */
+ if(H5G_ent_decode(udata->f, (const uint8_t **)&image, sblock->root_ent) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode root group symbol table entry")
+
+ /* Set the root group address to the correct value */
+ sblock->root_addr = sblock->root_ent->header;
+
+ /* This step is for h5repart tool only. If user wants to change file driver
+ * from family to sec2 while using h5repart, set the driver address to
+ * undefined to let the library ignore the family driver information saved
+ * in the superblock.
+ */
+ if(udata->ignore_drvrinfo && H5F_addr_defined(sblock->driver_addr)) {
+ /* Eliminate the driver info */
+ sblock->driver_addr = HADDR_UNDEF;
+ udata->drvrinfo_removed = TRUE;
+ } /* end if */
+
+ /* NOTE: Driver info block is decoded separately, later */
+
+ } /* end if */
+ else {
+ uint32_t read_chksum; /* Checksum read from file */
+
+ /* Size of file addresses */
+ sizeof_addr = *image++;
+ if(sizeof_addr != 2 && sizeof_addr != 4 &&
+ sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
+ sblock->sizeof_addr = sizeof_addr;
+ udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
+
+ /* Size of file sizes */
+ sizeof_size = *image++;
+ if(sizeof_size != 2 && sizeof_size != 4 &&
+ sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
+ sblock->sizeof_size = sizeof_size;
+ udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
+
+ /* File status flags (not really used yet) */
+ sblock->status_flags = *image++;
+ if(sblock->status_flags & ~H5F_SUPER_ALL_FLAGS)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad flag value for superblock")
+
+ /* Base, superblock extension, end of file & root group object header addresses */
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->base_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->ext_addr/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &udata->stored_eof/*out*/);
+ H5F_addr_decode(udata->f, (const uint8_t **)&image, &sblock->root_addr/*out*/);
+
+ /* checksum verification already done in verify_chksum cb */
+
+ /* Decode checksum */
+ UINT32DECODE(image, read_chksum);
+
+ /* The Driver Information Block may not appear with the version
+ * 2 super block. Thus we set the driver_addr field of the in
+ * core representation of the super block HADDR_UNDEF to prevent
+ * any attempt to load the Driver Information Block.
+ */
+ sblock->driver_addr = HADDR_UNDEF;
} /* end else */
/* Sanity check */
@@ -787,17 +889,72 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5F__cache_drvrinfo_get_load_size(const void H5_ATTR_UNUSED *udata, size_t *image_len)
+H5F__cache_drvrinfo_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- FUNC_ENTER_STATIC_NOERR
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */
+ unsigned drv_vers; /* Version of driver info block */
+ size_t drvinfo_len; /* Length of encoded buffer */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
/* Check arguments */
HDassert(image_len);
- /* Set the initial image length size */
- *image_len = H5F_DRVINFOBLOCK_HDR_SIZE; /* Fixed size portion of driver info block */
+ if(image == NULL) {
+ HDassert(actual_len == NULL);
- FUNC_LEAVE_NOAPI(SUCCEED)
+ /* Set the initial image length size */
+ *image_len = H5F_DRVINFOBLOCK_HDR_SIZE; /* Fixed size portion of driver info block */
+
+ } else { /* compute actual_len */
+
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+
+ /* Version number */
+ drv_vers = *image++;
+ if(drv_vers != HDF5_DRIVERINFO_VERSION_0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad driver information block version number")
+
+ image += 3; /* reserved bytes */
+
+ /* Driver info size */
+ UINT32DECODE(image, drvinfo_len);
+
+ /* Handle metadata cache retry for variable-sized portion of the driver info block */
+ if(*image_len != (H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo_len)) {
+ /* Sanity check */
+ HDassert(*image_len == H5F_DRVINFOBLOCK_HDR_SIZE);
+
+ /* extend the eoa if required so that we can read the complete driver info block */
+ {
+ haddr_t eoa;
+ haddr_t min_eoa;
+
+ /* get current eoa... */
+ if ((eoa = H5FD_get_eoa(udata->f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* ... if it is too small, extend it. */
+ min_eoa = udata->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo_len;
+
+ if ( H5F_addr_gt(min_eoa, eoa) )
+ if(H5FD_set_eoa(udata->f->shared->lf, H5FD_MEM_SUPER, min_eoa) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, \
+ "set end of space allocation request failed")
+ }
+ *actual_len = H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo_len;
+ }
+ } /* compute actual_len */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__cache_drvrinfo_get_load_size() */
@@ -853,35 +1010,11 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t len, void *_udata,
drv_name[8] = '\0';
image += 8; /* advance past name/version */
- /* Handle metadata cache retry for variable-sized portion of the driver info block */
- if(len != (H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len)) {
- /* Sanity check */
- HDassert(len == H5F_DRVINFOBLOCK_HDR_SIZE);
-
- /* extend the eoa if required so that we can read the complete driver info block */
- {
- haddr_t eoa;
- haddr_t min_eoa;
-
- /* get current eoa... */
- if ((eoa = H5FD_get_eoa(udata->f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, NULL, "driver get_eoa request failed")
-
- /* ... if it is too small, extend it. */
- min_eoa = udata->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len;
+ HDassert(len == (H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len));
- if ( H5F_addr_gt(min_eoa, eoa) )
- if(H5FD_set_eoa(udata->f->shared->lf, H5FD_MEM_SUPER, min_eoa) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, \
- "set end of space allocation request failed")
- }
-
- } /* end if */
- else {
- /* Validate and decode driver information */
- if(H5FD_sb_load(udata->f->shared->lf, drv_name, image) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "unable to decode driver information")
- } /* end if */
+ /* Validate and decode driver information */
+ if(H5FD_sb_load(udata->f->shared->lf, drv_name, image) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "unable to decode driver information")
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) <= len);
diff --git a/src/H5G.c b/src/H5G.c
index 03fe50a..21957d8 100644
--- a/src/H5G.c
+++ b/src/H5G.c
@@ -740,3 +740,71 @@ done:
FUNC_LEAVE_API(ret_value)
} /* end H5Gclose() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Gflush
+ *
+ * Purpose: Flushes all buffers associated with a group to disk.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Gflush(hid_t group_id)
+{
+ H5G_t *grp; /* Dataset for this operation */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", group_id);
+
+ /* Check args */
+ if(NULL == (grp = (H5G_t *)H5I_object_verify(group_id, H5I_GROUP)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a group")
+
+ /* Flush object's metadata to file */
+ if(H5O_flush_common(&grp->oloc, group_id, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTFLUSH, FAIL, "unable to flush group and object flush callback")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Gflush */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Grefresh
+ *
+ * Purpose: Refreshes all buffers associated with a dataset.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * July 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Grefresh(hid_t group_id)
+{
+ H5G_t * grp = NULL;
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", group_id);
+
+ /* Check args */
+ if(NULL == (grp = (H5G_t *)H5I_object_verify(group_id, H5I_GROUP)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a group")
+
+ /* Call private function to refresh group object */
+ if ((H5O_refresh_metadata(group_id, grp->oloc, H5AC_dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to refresh group")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Grefresh */
+
diff --git a/src/H5Gcache.c b/src/H5Gcache.c
index e7d44b5..a4f9530 100644
--- a/src/H5Gcache.c
+++ b/src/H5Gcache.c
@@ -63,7 +63,9 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static herr_t H5G__cache_node_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5G__cache_node_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static void *H5G__cache_node_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5G__cache_node_image_len(const void *thing, size_t *image_len,
@@ -94,6 +96,7 @@ const H5AC_class_t H5AC_SNODE[1] = {{
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5G__cache_node_get_load_size, /* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
H5G__cache_node_deserialize, /* 'deserialize' callback */
H5G__cache_node_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -133,9 +136,11 @@ H5FL_SEQ_EXTERN(H5G_entry_t);
*-------------------------------------------------------------------------
*/
static herr_t
-H5G__cache_node_get_load_size(const void *_udata, size_t *image_len)
+H5G__cache_node_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5F_t *f = (const H5F_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer to image to deserialize */
+ H5F_t *f = (H5F_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -143,8 +148,15 @@ H5G__cache_node_get_load_size(const void *_udata, size_t *image_len)
HDassert(f);
HDassert(image_len);
- /* report image length */
- *image_len = (size_t)(H5G_NODE_SIZE(f));
+ if(image == NULL) {
+ /* report image length */
+ *image_len = (size_t)(H5G_NODE_SIZE(f));
+ } else {
+ HDassert(actual_len);
+ HDassert(*image_len == *actual_len);
+ }
+
+ /* Nothing to do for non-NULL image : no need to compute actual_len */
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5G__cache_node_get_load_size() */
diff --git a/src/H5Gdense.c b/src/H5Gdense.c
index e8fa237..9f3a556 100644
--- a/src/H5Gdense.c
+++ b/src/H5Gdense.c
@@ -327,7 +327,7 @@ HDfprintf(stderr, "%s: fheap_id_len = %Zu\n", FUNC, fheap_id_len);
(uint32_t)fheap_id_len; /* Fractal heap ID */
bt2_cparam.split_percent = H5G_NAME_BT2_SPLIT_PERC;
bt2_cparam.merge_percent = H5G_NAME_BT2_MERGE_PERC;
- if(NULL == (bt2_name = H5B2_create(f, dxpl_id, &bt2_cparam, NULL)))
+ if(NULL == (bt2_name = H5B2_create(f, dxpl_id, &bt2_cparam, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to create v2 B-tree for name index")
/* Retrieve the v2 B-tree's address in the file */
@@ -348,7 +348,7 @@ HDfprintf(stderr, "%s: linfo->name_bt2_addr = %a\n", FUNC, linfo->name_bt2_addr)
(uint32_t)fheap_id_len; /* Fractal heap ID */
bt2_cparam.split_percent = H5G_CORDER_BT2_SPLIT_PERC;
bt2_cparam.merge_percent = H5G_CORDER_BT2_MERGE_PERC;
- if(NULL == (bt2_corder = H5B2_create(f, dxpl_id, &bt2_cparam, NULL)))
+ if(NULL == (bt2_corder = H5B2_create(f, dxpl_id, &bt2_cparam, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to create v2 B-tree for creation order index")
/* Retrieve the v2 B-tree's address in the file */
@@ -440,7 +440,7 @@ HDfprintf(stderr, "%s: HDstrlen(lnk->name) = %Zu, link_size = %Zu\n", FUNC, HDst
HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, FAIL, "unable to insert link into fractal heap")
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, linfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, linfo->name_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Create the callback information for v2 B-tree record insertion */
@@ -462,7 +462,7 @@ HDfprintf(stderr, "%s: HDstrlen(lnk->name) = %Zu, link_size = %Zu\n", FUNC, HDst
if(linfo->index_corder) {
/* Open the creation order index v2 B-tree */
HDassert(H5F_addr_defined(linfo->corder_bt2_addr));
- if(NULL == (bt2_corder = H5B2_open(f, dxpl_id, linfo->corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(f, dxpl_id, linfo->corder_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Insert the record into the creation order index v2 B-tree */
@@ -559,7 +559,7 @@ H5G__dense_lookup(H5F_t *f, hid_t dxpl_id, const H5O_linfo_t *linfo,
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, linfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, linfo->name_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Construct the user data for v2 B-tree callback */
@@ -735,7 +735,7 @@ H5G__dense_lookup_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_linfo_t *linfo,
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Construct the user data for v2 B-tree callback */
@@ -1044,7 +1044,7 @@ H5G__dense_iterate(H5F_t *f, hid_t dxpl_id, const H5O_linfo_t *linfo,
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Construct the user data for v2 B-tree iterator callback */
@@ -1246,7 +1246,7 @@ H5G__dense_get_name_by_idx(H5F_t *f, hid_t dxpl_id, H5O_linfo_t *linfo,
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Set up the user data for the v2 B-tree 'record remove' callback */
@@ -1328,7 +1328,7 @@ H5G_dense_remove_fh_cb(const void *obj, size_t H5_ATTR_UNUSED obj_len, void *_ud
H5G_bt2_ud_common_t bt2_udata; /* Info for B-tree callbacks */
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(udata->f, udata->dxpl_id, udata->corder_bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(udata->f, udata->dxpl_id, udata->corder_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Set up the user data for the v2 B-tree 'record remove' callback */
@@ -1442,7 +1442,7 @@ H5G__dense_remove(H5F_t *f, hid_t dxpl_id, const H5O_linfo_t *linfo,
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the name index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, linfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, linfo->name_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Set up the user data for the v2 B-tree 'record remove' callback */
@@ -1579,7 +1579,7 @@ H5G_dense_remove_by_idx_bt2_cb(const void *_record, void *_bt2_udata)
} /* end else */
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(bt2_udata->f, bt2_udata->dxpl_id, bt2_udata->other_bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(bt2_udata->f, bt2_udata->dxpl_id, bt2_udata->other_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for 'other' index")
/* Set the common information for the v2 B-tree remove operation */
@@ -1684,7 +1684,7 @@ H5G__dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_linfo_t *linfo,
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open fractal heap")
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for index")
/* Set up the user data for the v2 B-tree 'remove by index' callback */
@@ -1696,7 +1696,7 @@ H5G__dense_remove_by_idx(H5F_t *f, hid_t dxpl_id, const H5O_linfo_t *linfo,
udata.grp_full_path_r = grp_full_path_r;
/* Remove the record from the name index v2 B-tree */
- if(H5B2_remove_by_idx(bt2, dxpl_id, order, n, H5G_dense_remove_by_idx_bt2_cb, &udata) < 0)
+ if(H5B2_remove_by_idx(bt2, dxpl_id, order, n, NULL, H5G_dense_remove_by_idx_bt2_cb, &udata) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTREMOVE, FAIL, "unable to remove link from indexed v2 B-tree")
} /* end if */
else { /* Otherwise, we need to build a table of the links and sort it */
@@ -1778,7 +1778,7 @@ H5G__dense_delete(H5F_t *f, hid_t dxpl_id, H5O_linfo_t *linfo, hbool_t adj_link)
udata.replace_names = FALSE;
/* Delete the name index, adjusting the ref. count on links removed */
- if(H5B2_delete(f, dxpl_id, linfo->name_bt2_addr, NULL, H5G_dense_remove_bt2_cb, &udata) < 0)
+ if(H5B2_delete(f, dxpl_id, linfo->name_bt2_addr, NULL, NULL, H5G_dense_remove_bt2_cb, &udata) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree for name index")
/* Close the fractal heap */
@@ -1787,7 +1787,7 @@ H5G__dense_delete(H5F_t *f, hid_t dxpl_id, H5O_linfo_t *linfo, hbool_t adj_link)
} /* end if */
else {
/* Delete the name index, without adjusting the ref. count on the links */
- if(H5B2_delete(f, dxpl_id, linfo->name_bt2_addr, NULL, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl_id, linfo->name_bt2_addr, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree for name index")
} /* end else */
linfo->name_bt2_addr = HADDR_UNDEF;
@@ -1796,7 +1796,7 @@ H5G__dense_delete(H5F_t *f, hid_t dxpl_id, H5O_linfo_t *linfo, hbool_t adj_link)
if(linfo->index_corder) {
/* Delete the creation order index, without adjusting the ref. count on the links */
HDassert(H5F_addr_defined(linfo->corder_bt2_addr));
- if(H5B2_delete(f, dxpl_id, linfo->corder_bt2_addr, NULL, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl_id, linfo->corder_bt2_addr, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "unable to delete v2 B-tree for creation order index")
linfo->corder_bt2_addr = HADDR_UNDEF;
} /* end if */
diff --git a/src/H5Gint.c b/src/H5Gint.c
index cba4806..f7f4afe 100644
--- a/src/H5Gint.c
+++ b/src/H5Gint.c
@@ -472,7 +472,8 @@ done:
herr_t
H5G_close(H5G_t *grp)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ hbool_t corked; /* Whether the group is corked or not */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -485,6 +486,14 @@ H5G_close(H5G_t *grp)
if(0 == grp->shared->fo_count) {
HDassert(grp != H5G_rootof(H5G_fileof(grp)));
+ /* Uncork cache entries with object address tag */
+ if(H5AC_cork(grp->oloc.file, grp->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_SYSTEM, FAIL, "unable to retrieve an object's cork status")
+ else if(corked) {
+ if(H5AC_cork(grp->oloc.file, grp->oloc.addr, H5AC__UNCORK, NULL) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_SYSTEM, FAIL, "unable to uncork an object")
+ }
+
/* Remove the group from the list of opened objects in the file */
if(H5FO_top_decr(grp->oloc.file, grp->oloc.addr) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTRELEASE, FAIL, "can't decrement count for object")
diff --git a/src/H5Gobj.c b/src/H5Gobj.c
index f7782a6..4991967 100644
--- a/src/H5Gobj.c
+++ b/src/H5Gobj.c
@@ -208,7 +208,7 @@ H5G__obj_create_real(H5F_t *f, hid_t dxpl_id, const H5O_ginfo_t *ginfo,
/* Check for using the latest version of the group format */
/* (add more checks for creating "new format" groups when needed) */
- if(H5F_USE_LATEST_FORMAT(f) || linfo->track_corder
+ if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_STYLE_GROUP) || linfo->track_corder
|| (pline && pline->nused))
use_latest_format = TRUE;
else
@@ -342,7 +342,7 @@ H5G__obj_get_linfo(const H5O_loc_t *grp_oloc, H5O_linfo_t *linfo, hid_t dxpl_id)
/* Check if we are using "dense" link storage */
if(H5F_addr_defined(linfo->fheap_addr)) {
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(grp_oloc->file, dxpl_id, linfo->name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(grp_oloc->file, dxpl_id, linfo->name_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Retrieve # of records in "name" B-tree */
diff --git a/src/H5Goh.c b/src/H5Goh.c
index bde540c..dc6083c 100644
--- a/src/H5Goh.c
+++ b/src/H5Goh.c
@@ -363,7 +363,7 @@ H5O_group_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *
/* Check if name index available */
if(H5F_addr_defined(linfo.name_bt2_addr)) {
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(loc->file, dxpl_id, linfo.name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(loc->file, dxpl_id, linfo.name_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Get name index B-tree size */
@@ -374,7 +374,7 @@ H5O_group_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *
/* Check if creation order index available */
if(H5F_addr_defined(linfo.corder_bt2_addr)) {
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2_corder = H5B2_open(loc->file, dxpl_id, linfo.corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(loc->file, dxpl_id, linfo.corder_bt2_addr, NULL, NULL)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Get creation order index B-tree size */
diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h
index 5b8b054..10b07c0 100644
--- a/src/H5Gpublic.h
+++ b/src/H5Gpublic.h
@@ -167,6 +167,8 @@ H5_DLL herr_t H5Gget_objinfo(hid_t loc_id, const char *name,
hbool_t follow_link, H5G_stat_t *statbuf/*out*/);
H5_DLL ssize_t H5Gget_objname_by_idx(hid_t loc_id, hsize_t idx, char* name,
size_t size);
+H5_DLL herr_t H5Gflush(hid_t group_id);
+H5_DLL herr_t H5Grefresh(hid_t group_id);
H5_DLL H5G_obj_t H5Gget_objtype_by_idx(hid_t loc_id, hsize_t idx);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
diff --git a/src/H5Gstab.c b/src/H5Gstab.c
index 3a7cd9e..c8e5bc8 100644
--- a/src/H5Gstab.c
+++ b/src/H5Gstab.c
@@ -152,8 +152,8 @@ H5G__stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint, hid_t
HDassert(size_hint > 0);
/* Create the B-tree */
- if(H5B_create(f, dxpl_id, H5B_SNODE, NULL, &(stab->btree_addr)/*out*/) < 0)
- HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't create B-tree")
+ if(FAIL == H5B_create(f, dxpl_id, H5B_SNODE, NULL, &(stab->btree_addr)/*out*/))
+ HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't create B-tree")
/* Create symbol table private heap */
if(FAIL == H5HL_create(f, dxpl_id, size_hint, &(stab->heap_addr)/*out*/))
@@ -1067,10 +1067,10 @@ H5G__stab_valid(H5O_loc_t *grp_oloc, hid_t dxpl_id, H5O_stab_t *alt_stab)
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "unable to read symbol table message");
/* Check if the symbol table message's b-tree address is valid */
- if(H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr) < 0) {
+ if(H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr, NULL) < 0) {
/* Address is invalid, try the b-tree address in the alternate symbol
* table message */
- if(!alt_stab || H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, alt_stab->btree_addr) < 0)
+ if(!alt_stab || H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, alt_stab->btree_addr, NULL) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "unable to locate b-tree")
else {
/* The alternate symbol table's b-tree address is valid. Adjust the
diff --git a/src/H5Gtest.c b/src/H5Gtest.c
index a8796fb..b021f9d 100644
--- a/src/H5Gtest.c
+++ b/src/H5Gtest.c
@@ -413,7 +413,7 @@ H5G__new_dense_info_test(hid_t gid, hsize_t *name_count, hsize_t *corder_count)
HGOTO_DONE_TAG(FAIL, FAIL)
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(grp->oloc.file, dxpl_id, linfo.name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(grp->oloc.file, dxpl_id, linfo.name_bt2_addr, NULL, NULL)))
HGOTO_ERROR_TAG(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Retrieve # of records in name index */
@@ -423,7 +423,7 @@ H5G__new_dense_info_test(hid_t gid, hsize_t *name_count, hsize_t *corder_count)
/* Check if there is a creation order index */
if(H5F_addr_defined(linfo.corder_bt2_addr)) {
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2_corder = H5B2_open(grp->oloc.file, dxpl_id, linfo.corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(grp->oloc.file, dxpl_id, linfo.corder_bt2_addr, NULL, NULL)))
HGOTO_ERROR_TAG(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Retrieve # of records in creation order index */
@@ -633,7 +633,7 @@ H5G__verify_cached_stab_test(H5O_loc_t *grp_oloc, H5G_entry_t *ent)
HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, FAIL, "cached stab info does not match object header")
/* Verify that the btree address is valid */
- if(H5B_valid(grp_oloc->file, dxpl_id, H5B_SNODE, stab.btree_addr) < 0)
+ if(H5B_valid(grp_oloc->file, H5AC_ind_dxpl_id, H5B_SNODE, stab.btree_addr, NULL) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "b-tree address is invalid")
/* Verify that the heap address is valid */
diff --git a/src/H5HF.c b/src/H5HF.c
index efd57ae..3a6d4ea 100644
--- a/src/H5HF.c
+++ b/src/H5HF.c
@@ -952,3 +952,90 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HF_delete() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_depend
+ *
+ * Purpose: Make a child flush dependency between the fracal heap's
+ * header and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_depend(H5AC_info_t *parent_entry, H5HF_t *fh)
+{
+ /* Local variables */
+ H5HF_hdr_t *hdr = fh->hdr; /* Header for fractal heap */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(fh);
+ HDassert(hdr);
+
+ /* Set the shared heap header's file context for this operation */
+ hdr->f = fh->f;
+
+ /* Set up flush dependency between parent entry and fractal heap header */
+ if(H5HF__create_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "unable to create flush dependency on file metadata")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF_undepend
+ *
+ * Purpose: Remove a child flush dependency between the fractal heap's
+ * header and another piece of metadata in the file.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF_undepend(H5AC_info_t *parent_entry, H5HF_t *fh)
+{
+ /* Local variables */
+ H5HF_hdr_t *hdr = fh->hdr; /* Header for fractal heap */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef QAK
+HDfprintf(stderr, "%s: Called\n", FUNC);
+#endif /* QAK */
+
+ /*
+ * Check arguments.
+ */
+ HDassert(fh);
+ HDassert(hdr);
+
+ /* Set the shared heap header's file context for this operation */
+ hdr->f = fh->f;
+
+ /* Remove flush dependency between parent entry and fractal heap header */
+ if(H5HF__destroy_flush_depend(parent_entry, (H5AC_info_t *)hdr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency on file metadata")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF_undepend() */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 4f3dfc3..7337f39 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -73,7 +73,10 @@ static herr_t H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *d
static herr_t H5HF__dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable);
/* Metadata cache (H5AC) callbacks */
-static herr_t H5HF__cache_hdr_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5HF__cache_hdr_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5HF__cache_hdr_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5HF__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HF__cache_hdr_image_len(const void *thing, size_t *image_len,
@@ -86,7 +89,10 @@ static herr_t H5HF__cache_hdr_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5HF__cache_hdr_free_icr(void *thing);
-static herr_t H5HF__cache_iblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5HF__cache_iblock_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5HF__cache_iblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5HF__cache_iblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HF__cache_iblock_image_len(const void *thing,
@@ -101,7 +107,10 @@ static herr_t H5HF__cache_iblock_serialize(const H5F_t *f, void *image,
static herr_t H5HF__cache_iblock_notify(H5C_notify_action_t action, void *thing);
static herr_t H5HF__cache_iblock_free_icr(void *thing);
-static herr_t H5HF__cache_dblock_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5HF__cache_dblock_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5HF__cache_dblock_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5HF__cache_dblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HF__cache_dblock_image_len(const void *thing,
@@ -140,6 +149,7 @@ const H5AC_class_t H5AC_FHEAP_HDR[1] = {{
H5FD_MEM_FHEAP_HDR, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5HF__cache_hdr_get_load_size, /* 'get_load_size' callback */
+ H5HF__cache_hdr_verify_chksum, /* 'verify_chksum' callback */
H5HF__cache_hdr_deserialize, /* 'deserialize' callback */
H5HF__cache_hdr_image_len, /* 'image_len' callback */
H5HF__cache_hdr_pre_serialize, /* 'pre_serialize' callback */
@@ -157,6 +167,7 @@ const H5AC_class_t H5AC_FHEAP_IBLOCK[1] = {{
H5FD_MEM_FHEAP_IBLOCK, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5HF__cache_iblock_get_load_size, /* 'get_load_size' callback */
+ H5HF__cache_iblock_verify_chksum, /* 'verify_chksum' callback */
H5HF__cache_iblock_deserialize, /* 'deserialize' callback */
H5HF__cache_iblock_image_len, /* 'image_len' callback */
H5HF__cache_iblock_pre_serialize, /* 'pre_serialize' callback */
@@ -174,6 +185,7 @@ const H5AC_class_t H5AC_FHEAP_DBLOCK[1] = {{
H5FD_MEM_FHEAP_DBLOCK, /* File space memory type for client */
H5C__CLASS_COMPRESSED_FLAG, /* Client class behavior flags */
H5HF__cache_dblock_get_load_size, /* 'get_load_size' callback */
+ H5HF__cache_dblock_verify_chksum, /* 'verify_chksum' callback */
H5HF__cache_dblock_deserialize, /* 'deserialize' callback */
H5HF__cache_dblock_image_len, /* 'image_len' callback */
H5HF__cache_dblock_pre_serialize, /* 'pre_serialize' callback */
@@ -327,10 +339,16 @@ H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable)
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
+H5HF__cache_hdr_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5HF_hdr_cache_ud_t *udata = (const H5HF_hdr_cache_ud_t *)_udata; /* pointer to user data */
- H5HF_hdr_t dummy_hdr; /* dummy header -- to compute size */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; /* pointer to user data */
+ H5HF_hdr_t dummy_hdr; /* dummy header -- to compute size */
+ unsigned id_len; /* Size of heap IDs (in bytes) */
+ unsigned filter_len; /* Size of I/O filter information (in bytes) */
+ size_t filter_info_size; /* Size of filter information */
+ htri_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_NOERR
@@ -338,19 +356,88 @@ H5HF__cache_hdr_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata);
HDassert(image_len);
- /* Set the internal parameters for the heap */
- dummy_hdr.f = udata->f;
- dummy_hdr.sizeof_size = H5F_SIZEOF_SIZE(udata->f);
- dummy_hdr.sizeof_addr = H5F_SIZEOF_ADDR(udata->f);
+ if(image == NULL) {
+ /* Set the internal parameters for the heap */
+ dummy_hdr.f = udata->f;
+ dummy_hdr.sizeof_size = H5F_SIZEOF_SIZE(udata->f);
+ dummy_hdr.sizeof_addr = H5F_SIZEOF_ADDR(udata->f);
- /* Compute the 'base' size of the fractal heap header on disk */
- *image_len = (size_t)H5HF_HEADER_SIZE(&dummy_hdr);
+ /* Compute the 'base' size of the fractal heap header on disk */
+ *image_len = (size_t)H5HF_HEADER_SIZE(&dummy_hdr);
- FUNC_LEAVE_NOAPI(SUCCEED)
+ } else { /* compute actual_len */
+
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+
+ /* Magic number */
+ if(HDmemcmp(image, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_DONE(FAIL)
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ if(*image++ != H5HF_HDR_VERSION)
+ HGOTO_DONE(FAIL)
+
+ /* General heap information */
+ UINT16DECODE(image, id_len); /* Heap ID length */
+ UINT16DECODE(image, filter_len); /* I/O filters' encoded length */
+
+ if(filter_len > 0) {
+
+ /* Compute the size of the extra filter information */
+ filter_info_size = (size_t)(H5F_SIZEOF_SIZE(udata->f) /* Size of size for filtered root direct block */
+ + (unsigned)4 /* Size of filter mask for filtered root direct block */
+ + filter_len); /* Size of encoded I/O filter info */
+
+ /* Compute the heap header's size */
+ *actual_len += filter_info_size;
+ }
+
+ } /* compute actual_len */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HF__cache_hdr_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_hdr_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5HF__cache_hdr_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__cache_hdr_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HF__cache_hdr_deserialize
*
* Purpose: Given a buffer containing an on disk image of a fractal heap
@@ -399,7 +486,6 @@ H5HF__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
const uint8_t *image = (const uint8_t *)_image; /* Pointer into into supplied image */
size_t size; /* Header size */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
uint8_t heap_flags; /* Status flags for heap */
void * ret_value = NULL; /* Return value */
@@ -530,9 +616,7 @@ H5HF__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* Set the heap header's size */
hdr->heap_size = size;
- /* Compute checksum on entire header */
- /* (including the filter information, if present) */
- computed_chksum = H5_checksum_metadata(_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -540,10 +624,6 @@ H5HF__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == hdr->heap_size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap header")
-
/* Finish initialization of heap header */
if(H5HF_hdr_finish_init(hdr) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't finish initializing shared fractal heap header")
@@ -853,17 +933,26 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_iblock_get_load_size(const void *_udata, size_t *image_len)
+H5HF__cache_iblock_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5HF_iblock_cache_ud_t *udata = (const H5HF_iblock_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5HF_iblock_cache_ud_t *udata = (H5HF_iblock_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
/* Sanity checks */
HDassert(udata);
+ HDassert(udata->par_info);
+ HDassert(udata->par_info->hdr);
HDassert(image_len);
- *image_len = (size_t)H5HF_MAN_INDIRECT_SIZE(udata->par_info->hdr, *udata->nrows);
+ if(image == NULL)
+ *image_len = (size_t)H5HF_MAN_INDIRECT_SIZE(udata->par_info->hdr, *udata->nrows);
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ } /* end else */
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HF__cache_iblock_get_load_size() */
@@ -871,6 +960,42 @@ H5HF__cache_iblock_get_load_size(const void *_udata, size_t *image_len)
/***********************************************************/
/* metadata cache callback definitions for indirect blocks */
/***********************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_iblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5HF__cache_iblock_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__cache_iblock_verify_chksum() */
+
/*-------------------------------------------------------------------------
@@ -903,7 +1028,6 @@ H5HF__cache_iblock_deserialize(const void *_image, size_t len, void *_udata,
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
haddr_t heap_addr; /* Address of heap header in the file */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
unsigned u; /* Local index variable */
void * ret_value = NULL; /* Return value */
@@ -1030,8 +1154,7 @@ H5HF__cache_iblock_deserialize(const void *_image, size_t len, void *_udata,
/* Sanity check */
HDassert(iblock->nchildren); /* indirect blocks w/no children should have been deleted */
- /* Compute checksum on indirect block */
- computed_chksum = H5_checksum_metadata((const uint8_t *)_image, (size_t)(image - (const uint8_t *)_image), 0);
+ /* checksum verification already done by verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
@@ -1039,10 +1162,6 @@ H5HF__cache_iblock_deserialize(const void *_image, size_t len, void *_udata,
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == iblock->size);
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap indirect block")
-
/* Check if we have any indirect block children */
if(iblock->nrows > hdr->man_dtable.max_direct_rows) {
unsigned indir_rows;/* Number of indirect rows in this indirect block */
@@ -1520,12 +1639,15 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_dblock_get_load_size(const void *_udata, size_t *image_len)
+H5HF__cache_dblock_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr)
{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
const H5HF_dblock_cache_ud_t *udata = (const H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
- const H5HF_parent_t *par_info; /* Pointer to parent information */
- const H5HF_hdr_t *hdr; /* Shared fractal heap information */
- size_t size;
+ const H5HF_parent_t *par_info; /* Pointer to parent information */
+ const H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ size_t compressed_size=0;
+ hbool_t compressed;
FUNC_ENTER_STATIC_NOERR
@@ -1541,17 +1663,45 @@ H5HF__cache_dblock_get_load_size(const void *_udata, size_t *image_len)
/* Check for I/O filters on this heap */
if(hdr->filter_len > 0) {
- /* Check for root direct block */
- if(par_info->iblock == NULL)
- size = hdr->pline_root_direct_size;
- else
- size = par_info->iblock->filt_ents[par_info->entry].size;
- } /* end if */
- else
- size = udata->dblock_size;
-
- *image_len = size;
+ /* Check for root direct block */
+ if(par_info->iblock == NULL) {
+ /* filtered direct block */
+ compressed_size = hdr->pline_root_direct_size;
+ } /* end if */
+ else {
+ /* filtered direct block */
+ compressed_size = par_info->iblock->filt_ents[par_info->entry].size;
+ } /* end else */
+ }
+
+ if(image == NULL) {
+
+ /* depend on I/O filters on this heap */
+ *image_len = (hdr->filter_len > 0) ? compressed_size:udata->dblock_size;
+
+ } else {
+
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ HDassert(compressed_ptr);
+ HDassert(compressed_image_len_ptr);
+
+ if(hdr->filter_len > 0) {
+ HDassert(*image_len == compressed_size);
+ compressed = TRUE;
+ } else {
+ HDassert(*image_len == udata->dblock_size);
+ compressed = FALSE;
+ compressed_size = 0;
+ }
+
+ /* decompressed size */
+ *actual_len = udata->dblock_size;
+ *compressed_ptr = compressed;
+ *compressed_image_len_ptr = compressed_size;
+ }
+
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HF__cache_dblock_get_load_size() */
@@ -1559,6 +1709,139 @@ H5HF__cache_dblock_get_load_size(const void *_udata, size_t *image_len)
/* metadata cache callback definitions for direct blocks */
/*********************************************************/
+/*-------------------------------------------------------------------------
+ * Function: H5HF__cache_dblock_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5HF__cache_dblock_verify_chksum(const void *_image, size_t len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5HF_dblock_cache_ud_t *udata = (H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
+ void *read_buf = NULL; /* Pointer to buffer to read in */
+ size_t read_size; /* Size of filtered direct block to read */
+ H5HF_hdr_t *hdr; /* Shared fractal heap information */
+ H5HF_parent_t *par_info; /* Pointer to parent information */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ size_t chk_size; /* The size for validating checksum */
+ uint8_t *chk_p; /* Pointer to the area for validating checksum */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(image);
+ HDassert(udata);
+ par_info = (H5HF_parent_t *)(&(udata->par_info));
+ HDassert(par_info);
+ hdr = par_info->hdr;
+ HDassert(hdr);
+
+ /* len is the decompressed size of the direct block */
+
+ udata->decompressed = FALSE;
+ udata->dblk = NULL;
+
+ /* Get out if data block is not checksummed */
+ if(!(hdr->checksum_dblocks))
+ HGOTO_DONE(TRUE);
+
+ /* Determine the size on disk */
+ if(hdr->filter_len > 0) {
+
+ /* Check for root direct block */
+ if(par_info->iblock == NULL) {
+ /* Set up parameters to read filtered direct block */
+ read_size = hdr->pline_root_direct_size;
+ } /* end if */
+ else {
+ /* Set up parameters to read filtered direct block */
+ read_size = par_info->iblock->filt_ents[par_info->entry].size;
+ } /* end else */
+ } else
+ read_size = len;
+
+ /* Allocate buffer to perform I/O filtering on and copy image into
+ * it. Must do this as H5Z_pipeline() may re-sized the buffer
+ * provided to it.
+ */
+ if(NULL == (read_buf = H5MM_malloc(read_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline buffer")
+
+ if(hdr->filter_len > 0) {
+ size_t nbytes; /* Number of bytes used in buffer, after applying reverse filters */
+ unsigned filter_mask; /* Excluded filters for direct block */
+ H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
+
+ /* Push direct block data through I/O filter pipeline */
+ nbytes = read_size;
+ filter_mask = udata->filter_mask;
+ HDmemcpy(read_buf, image, read_size);
+
+ if(H5Z_pipeline(&(hdr->pline), H5Z_FLAG_REVERSE, &filter_mask, H5Z_ENABLE_EDC, filter_cb, &nbytes, &read_size, &read_buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFILTER, FAIL, "output pipeline failed")
+
+ /* Sanity check */
+ HDassert(nbytes == len);
+ udata->decompressed = TRUE;
+
+ } else
+ HDmemcpy(read_buf, image, read_size);
+
+ /* Decode checksum */
+ chk_size = (size_t)(H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr) - H5HF_SIZEOF_CHKSUM);
+ chk_p = (uint8_t *)read_buf + chk_size;
+
+ /* Metadata checksum */
+ UINT32DECODE(chk_p, stored_chksum);
+
+ chk_p -= H5HF_SIZEOF_CHKSUM;
+
+ /* Reset checksum field, for computing the checksum */
+ /* (Casting away const OK - QAK) */
+ HDmemset(chk_p, 0, (size_t)H5HF_SIZEOF_CHKSUM);
+
+ /* Compute checksum on entire direct block */
+ computed_chksum = H5_checksum_metadata(read_buf, len, 0);
+
+ /* Restore the checksum */
+ UINT32ENCODE(chk_p, stored_chksum)
+
+ /* Verify checksum */
+ if(stored_chksum != computed_chksum)
+ HGOTO_DONE(FALSE);
+
+ /* Save the decompressed data to be used later in deserialize callback */
+ if(hdr->filter_len > 0) {
+
+ HDassert(udata->decompressed);
+ HDassert(len == udata->dblock_size);
+ /* Allocate block buffer */
+ if(NULL == (udata->dblk = H5FL_BLK_MALLOC(direct_block, (size_t)len)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Copy un-filtered data into block's buffer */
+ HDmemcpy(udata->dblk, read_buf, len);
+ }
+
+done:
+ /* Release the read buffer */
+ if(read_buf)
+ H5MM_xfree(read_buf);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__cache_dblock_verify_chksum() */
+
/*-------------------------------------------------------------------------
* Function: H5HF__cache_dblock_deserialize
@@ -1626,50 +1909,66 @@ H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata,
dblock->write_buf = NULL;
dblock->write_size = 0;
- /* Allocate block buffer */
+ if(udata->dblk && udata->decompressed) {
+ /* direct block is already decompressed in verify_chksum callback */
+ HDassert(hdr->filter_len > 0);
+ HDassert(len == dblock->size);
+ dblock->blk = udata->dblk;
+ } else {
+ HDassert(udata->dblk == NULL);
+ HDassert(!udata->decompressed);
+
+ /* Allocate block buffer */
/* XXX: Change to using free-list factories */
- if(NULL == (dblock->blk = H5FL_BLK_MALLOC(direct_block, (size_t)dblock->size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ if(NULL == (dblock->blk = H5FL_BLK_MALLOC(direct_block, (size_t)dblock->size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ }
/* Check for I/O filters on this heap */
if(hdr->filter_len > 0) {
- H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
- size_t nbytes; /* Number of bytes used in buffer, after applying reverse filters */
- void *read_buf; /* Pointer to buffer to read in */
- size_t read_size; /* Size of filtered direct block to read */
- unsigned filter_mask; /* Excluded filters for direct block */
-
- /* Check for root direct block */
- if(par_info->iblock == NULL)
- /* Set up parameters to read filtered direct block */
- read_size = hdr->pline_root_direct_size;
- else
- /* Set up parameters to read filtered direct block */
- read_size = par_info->iblock->filt_ents[par_info->entry].size;
- HDassert(len == read_size);
+ H5Z_cb_t filter_cb = {NULL, NULL}; /* Filter callback structure */
+ size_t nbytes; /* Number of bytes used in buffer, after applying reverse filters */
+ void *read_buf; /* Pointer to buffer to read in */
+ size_t read_size; /* Size of filtered direct block to read */
+ unsigned filter_mask; /* Excluded filters for direct block */
+
+ if(!udata->decompressed) {
+ HDassert(udata->dblk == NULL);
+
+ /* Check for root direct block */
+ if(par_info->iblock == NULL) {
+ /* Set up parameters to read filtered direct block */
+ read_size = hdr->pline_root_direct_size;
+ } /* end if */
+ else {
+ /* Set up parameters to read filtered direct block */
+ read_size = par_info->iblock->filt_ents[par_info->entry].size;
+ } /* end else */
+ HDassert(len == read_size);
- /* Allocate buffer to perform I/O filtering on and copy image into
- * it. Must do this as H5Z_pipeline() may re-sized the buffer
- * provided to it.
- */
- if(NULL == (read_buf = H5MM_malloc(read_size)))
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "memory allocation failed for pipeline buffer")
- HDmemcpy(read_buf, _image, len);
+ /* Allocate buffer to perform I/O filtering on and copy image into
+ * it. Must do this as H5Z_pipeline() may re-sized the buffer
+ * provided to it.
+ */
+ if(NULL == (read_buf = H5MM_malloc(read_size)))
+ HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "memory allocation failed for pipeline buffer")
+ HDmemcpy(read_buf, _image, len);
- /* Push direct block data through I/O filter pipeline */
- nbytes = read_size;
- filter_mask = udata->filter_mask;
- if(H5Z_pipeline(&(hdr->pline), H5Z_FLAG_REVERSE, &filter_mask, H5Z_ENABLE_EDC, filter_cb, &nbytes, &read_size, &read_buf) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTFILTER, NULL, "output pipeline failed")
+ /* Push direct block data through I/O filter pipeline */
+ nbytes = read_size;
+ filter_mask = udata->filter_mask;
+ if(H5Z_pipeline(&(hdr->pline), H5Z_FLAG_REVERSE, &filter_mask, H5Z_ENABLE_EDC, filter_cb, &nbytes, &read_size, &read_buf) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTFILTER, NULL, "output pipeline failed")
- /* Sanity check */
- HDassert(nbytes == dblock->size);
+ /* Sanity check */
+ HDassert(nbytes == dblock->size);
- /* Copy un-filtered data into block's buffer */
- HDmemcpy(dblock->blk, read_buf, dblock->size);
+ /* Copy un-filtered data into block's buffer */
+ HDmemcpy(dblock->blk, read_buf, dblock->size);
- /* Release the read buffer */
- H5MM_xfree(read_buf);
+ /* Release the read buffer */
+ H5MM_xfree(read_buf);
+ }
} /* end if */
else {
/* copy image to dblock->blk */
@@ -1709,22 +2008,13 @@ H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata,
/* Decode checksum on direct block, if requested */
if(hdr->checksum_dblocks) {
- uint32_t stored_chksum; /* Metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
+ uint32_t stored_chksum; /* Metadata checksum value */
+
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(image, stored_chksum);
- /* Reset checksum field, for computing the checksum */
- /* (Casting away const OK - QAK) */
- HDmemset((uint8_t *)image - H5HF_SIZEOF_CHKSUM, 0, (size_t)H5HF_SIZEOF_CHKSUM);
-
- /* Compute checksum on entire direct block */
- computed_chksum = H5_checksum_metadata(dblock->blk, dblock->size, 0);
-
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "incorrect metadata checksum for fractal heap direct block")
} /* end if */
/* Sanity check */
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index 19ea14a..d1426c6 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -126,6 +126,7 @@ H5HF_hdr_alloc(H5F_t *f)
/* Set the internal parameters for the heap */
hdr->f = f;
+ hdr->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0;
hdr->sizeof_size = H5F_SIZEOF_SIZE(f);
hdr->sizeof_addr = H5F_SIZEOF_ADDR(f);
@@ -427,7 +428,7 @@ H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOPY, HADDR_UNDEF, "can't copy I/O filter pipeline")
/* Pay attention to the latest version flag for the file */
- if(H5F_USE_LATEST_FORMAT(hdr->f))
+ if(H5F_USE_LATEST_FLAGS(hdr->f, H5F_LATEST_PLINE_MSG))
/* Set the latest version for the I/O pipeline message */
if(H5O_pline_set_latest_version(&(hdr->pline)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTSET, HADDR_UNDEF, "can't set latest version of I/O filter pipeline")
diff --git a/src/H5HFhuge.c b/src/H5HFhuge.c
index ccee89e..95095e9 100644
--- a/src/H5HFhuge.c
+++ b/src/H5HFhuge.c
@@ -157,13 +157,16 @@ H5HF_huge_bt2_create(H5HF_hdr_t *hdr, hid_t dxpl_id)
bt2_cparam.merge_percent = H5HF_HUGE_BT2_MERGE_PERC;
/* Create v2 B-tree for tracking 'huge' objects */
- if(NULL == (hdr->huge_bt2 = H5B2_create(hdr->f, dxpl_id, &bt2_cparam, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_create(hdr->f, dxpl_id, &bt2_cparam, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTCREATE, FAIL, "can't create v2 B-tree for tracking 'huge' heap objects")
/* Retrieve the v2 B-tree's address in the file */
if(H5B2_get_addr(hdr->huge_bt2, &hdr->huge_bt2_addr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get v2 B-tree address for tracking 'huge' heap objects")
+ /* Create a flush dependency between the 'huge' v2 B-tree and the fractal heap */
+ if(hdr->swmr_write && H5B2_depend((H5AC_info_t *)hdr, hdr->huge_bt2) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "can't create flush dependency between fractal heap and 'huge' v2 B-tree")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HF_huge_bt2_create() */
@@ -339,7 +342,7 @@ HDfprintf(stderr, "%s: obj_size = %Zu\n", FUNC, obj_size);
/* Check if v2 B-tree is open yet */
if(NULL == hdr->huge_bt2) {
/* Open existing v2 B-tree */
- if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' heap objects")
} /* end if */
} /* end else */
@@ -545,7 +548,7 @@ H5HF_huge_get_obj_len(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Check if v2 B-tree is open yet */
if(NULL == hdr->huge_bt2) {
/* Open existing v2 B-tree */
- if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' heap objects")
} /* end if */
@@ -629,7 +632,7 @@ H5HF__huge_get_obj_off(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Check if v2 B-tree is open yet */
if(NULL == hdr->huge_bt2) {
/* Open existing v2 B-tree */
- if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' heap objects")
} /* end if */
@@ -723,7 +726,7 @@ H5HF_huge_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Check if v2 B-tree is open yet */
if(NULL == hdr->huge_bt2) {
/* Open existing v2 B-tree */
- if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' heap objects")
} /* end if */
@@ -871,7 +874,7 @@ H5HF_huge_write(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Check if v2 B-tree is open yet */
if(NULL == hdr->huge_bt2) {
/* Open existing v2 B-tree */
- if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' heap objects")
} /* end if */
@@ -1001,7 +1004,7 @@ H5HF_huge_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id)
/* Check if v2 B-tree is open yet */
if(NULL == hdr->huge_bt2) {
/* Open existing v2 B-tree */
- if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (hdr->huge_bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' heap objects")
} /* end if */
@@ -1121,10 +1124,14 @@ H5HF_huge_term(H5HF_hdr_t *hdr, hid_t dxpl_id)
if(H5F_addr_defined(hdr->huge_bt2_addr) && hdr->huge_nobjs == 0) {
/* Sanity check */
HDassert(hdr->huge_size == 0);
+
+ /* Destroy the flush dependency between the 'huge' v2 B-tree and the fractal heap */
+ if(hdr->swmr_write && H5B2_undepend((H5AC_info_t *)hdr, hdr->huge_bt2) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "can't destroy flush dependency between fractal heap and 'huge' v2 B-tree")
/* Delete the v2 B-tree */
/* (any v2 B-tree class will work here) */
- if(H5B2_delete(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL, NULL) < 0)
+ if(H5B2_delete(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
/* Reset the information about 'huge' objects in the file */
@@ -1192,7 +1199,7 @@ H5HF_huge_delete(H5HF_hdr_t *hdr, hid_t dxpl_id)
} /* end else */
/* Delete the v2 B-tree */
- if(H5B2_delete(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, op, &udata) < 0)
+ if(H5B2_delete(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL, op, &udata) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree")
done:
diff --git a/src/H5HFint.c b/src/H5HFint.c
new file mode 100644
index 0000000..61c357c
--- /dev/null
+++ b/src/H5HFint.c
@@ -0,0 +1,146 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5HFint.c
+ * Fall 2012
+ * Dana Robinson
+ *
+ * Purpose: Internal routines for fractal heaps.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/**********************/
+/* Module Declaration */
+/**********************/
+
+#include "H5HFmodule.h" /* This source code file is part of the H5HF module */
+
+
+/***********************/
+/* Other Packages Used */
+/***********************/
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5ACprivate.h" /* Metadata Cache */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5HFpkg.h" /* Fractal Heaps */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__create_flush_depend
+ *
+ * Purpose: Create a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF__create_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Create a flush dependency between parent and child entry */
+ if(H5AC_create_flush_dependency(parent_entry, child_entry) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__create_flush_depend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5HF__destroy_flush_depend
+ *
+ * Purpose: Destroy a flush dependency between two data structure components
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Fall 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5HF__destroy_flush_depend(H5AC_info_t *parent_entry, H5AC_info_t *child_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(parent_entry);
+ HDassert(child_entry);
+
+ /* Destroy a flush dependency between parent and child entry */
+ if(H5AC_destroy_flush_dependency(parent_entry, child_entry) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__destroy_flush_depend() */
+
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index 6253160..b4e6558 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -360,6 +360,7 @@ typedef struct H5HF_hdr_t {
uint8_t heap_off_size; /* Size of heap offsets (in bytes) */
uint8_t heap_len_size; /* Size of heap ID lengths (in bytes) */
hbool_t checked_filters; /* TRUE if pipeline passes can_apply checks */
+ hbool_t swmr_write; /* Flag indicating the file is opened with SWMR-write access */
} H5HF_hdr_t;
/* Common indirect block doubling table entry */
@@ -544,6 +545,14 @@ typedef struct H5HF_dblock_cache_ud_t {
* calls to it.
*/
unsigned filter_mask; /* Excluded filters for direct block */
+ uint8_t *dblk; /* Pointer to the buffer containing the decompressed
+ * direct block data obtained in verify_chksum callback.
+ * It will be used later in deserialize callback.
+ */
+ htri_t decompressed; /* Indicate that the direct block has been
+ * decompressed in verify_chksum callback.
+ * It will be used later in deserialize callback.
+ */
} H5HF_dblock_cache_ud_t;
@@ -608,6 +617,12 @@ H5FL_BLK_EXTERN(direct_block);
/* Package Private Prototypes */
/******************************/
+/* Generic routines */
+H5_DLL herr_t H5HF__create_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+H5_DLL herr_t H5HF__destroy_flush_depend(H5AC_info_t *parent_entry,
+ H5AC_info_t *child_entry);
+
/* Doubling table routines */
H5_DLL herr_t H5HF_dtable_init(H5HF_dtable_t *dtable);
H5_DLL herr_t H5HF_dtable_dest(H5HF_dtable_t *dtable);
diff --git a/src/H5HFprivate.h b/src/H5HFprivate.h
index 441ad3e..c052143 100644
--- a/src/H5HFprivate.h
+++ b/src/H5HFprivate.h
@@ -127,6 +127,8 @@ H5_DLL herr_t H5HF_op(H5HF_t *fh, hid_t dxpl_id, const void *id,
H5_DLL herr_t H5HF_remove(H5HF_t *fh, hid_t dxpl_id, const void *id);
H5_DLL herr_t H5HF_close(H5HF_t *fh, hid_t dxpl_id);
H5_DLL herr_t H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr);
+H5_DLL herr_t H5HF_depend(H5AC_info_t *parent_entry, H5HF_t *fh);
+H5_DLL herr_t H5HF_undepend(H5AC_info_t *parent_entry, H5HF_t *fh);
/* Statistics routines */
H5_DLL herr_t H5HF_stat_info(const H5HF_t *fh, H5HF_stat_t *stats);
diff --git a/src/H5HFstat.c b/src/H5HFstat.c
index 303b1f4..b54a191 100644
--- a/src/H5HFstat.c
+++ b/src/H5HFstat.c
@@ -156,7 +156,7 @@ H5HF_size(const H5HF_t *fh, hid_t dxpl_id, hsize_t *heap_size)
/* Check for B-tree storage of huge objects in fractal heap */
if(H5F_addr_defined(hdr->huge_bt2_addr)) {
/* Open the huge object index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f)))
+ if(NULL == (bt2 = H5B2_open(hdr->f, dxpl_id, hdr->huge_bt2_addr, hdr->f, NULL)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for tracking 'huge' objects")
/* Get the B-tree storage */
diff --git a/src/H5HG.c b/src/H5HG.c
index 57a1cf2..41c5007 100644
--- a/src/H5HG.c
+++ b/src/H5HG.c
@@ -299,9 +299,9 @@ done:
static size_t
H5HG_alloc(H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned *heap_flags_ptr)
{
- size_t idx;
- uint8_t *p;
- size_t need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(size);
+ size_t idx;
+ uint8_t *p;
+ size_t need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(size);
size_t ret_value = 0; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -360,11 +360,11 @@ H5HG_alloc(H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned *heap_flags_ptr)
/* Fix the free space object */
if(need == heap->obj[0].size) {
- /*
- * All free space has been exhausted from this collection.
- */
- heap->obj[0].size = 0;
- heap->obj[0].begin = NULL;
+ /*
+ * All free space has been exhausted from this collection.
+ */
+ heap->obj[0].size = 0;
+ heap->obj[0].begin = NULL;
} /* end if */
else if(heap->obj[0].size-need >= H5HG_SIZEOF_OBJHDR (f)) {
/*
@@ -789,10 +789,10 @@ done:
herr_t
H5HG_remove (H5F_t *f, hid_t dxpl_id, H5HG_t *hobj)
{
- H5HG_heap_t *heap = NULL;
- uint8_t *p = NULL, *obj_start = NULL;
- size_t need;
- unsigned u;
+ H5HG_heap_t *heap = NULL;
+ uint8_t *p = NULL, *obj_start = NULL;
+ size_t need;
+ unsigned u;
unsigned flags = H5AC__NO_FLAGS_SET;/* Whether the heap gets deleted */
herr_t ret_value = SUCCEED; /* Return value */
diff --git a/src/H5HGcache.c b/src/H5HGcache.c
index c1eb8d9..511937d 100644
--- a/src/H5HGcache.c
+++ b/src/H5HGcache.c
@@ -62,7 +62,9 @@
/********************/
/* Metadata cache callbacks */
-static herr_t H5HG__cache_heap_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5HG__cache_heap_get_load_size(const void *_image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static void *H5HG__cache_heap_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HG__cache_heap_image_len(const void *thing, size_t *image_len,
@@ -83,6 +85,7 @@ const H5AC_class_t H5AC_GHEAP[1] = {{
H5FD_MEM_GHEAP, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5HG__cache_heap_get_load_size, /* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
H5HG__cache_heap_deserialize, /* 'deserialize' callback */
H5HG__cache_heap_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -123,15 +126,48 @@ const H5AC_class_t H5AC_GHEAP[1] = {{
*-------------------------------------------------------------------------
*/
static herr_t
-H5HG__cache_heap_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
+H5HG__cache_heap_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- FUNC_ENTER_STATIC_NOERR
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5F_t *f = (H5F_t *)_udata; /* File pointer -- obtained from user data */
+ size_t heap_size; /* Total size of collection */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
HDassert(image_len);
- *image_len = (size_t)H5HG_MINSIZE;
+ if(image == NULL) {
+ *image_len = (size_t)H5HG_MINSIZE;
- FUNC_LEAVE_NOAPI(SUCCEED)
+ } else { /* compute actual_len */
+ HDassert(f);
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+
+ /* Magic number */
+ if(HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "bad global heap collection signature")
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ if(H5HG_VERSION != *image++)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "wrong version number in global heap")
+
+ /* Reserved */
+ image += 3;
+
+ /* Size */
+ H5F_DECODE_LENGTH(f, image, heap_size);
+ HDassert(heap_size >= H5HG_MINSIZE);
+ HDassert(*image_len == H5HG_MINSIZE);
+
+ *actual_len = heap_size;
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HG__cache_heap_get_load_size() */
diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h
index ffbf7c6..e566ece 100644
--- a/src/H5HGpkg.h
+++ b/src/H5HGpkg.h
@@ -116,9 +116,9 @@ H5FL_BLK_EXTERN(gheap_chunk);
/****************************/
typedef struct H5HG_obj_t {
- int nrefs; /*reference count */
- size_t size; /*total size of object */
- uint8_t *begin; /*ptr to object into heap->chunk*/
+ int nrefs; /* reference count */
+ size_t size; /* total size of object */
+ uint8_t *begin; /* ptr to object into heap->chunk */
} H5HG_obj_t;
/* Forward declarations for fields */
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index faa5ff2..678da7f 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -71,7 +71,9 @@
/* Metadata cache callbacks */
/* Local heap prefix */
-static herr_t H5HL__cache_prefix_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5HL__cache_prefix_get_load_size(const void *_image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static void *H5HL__cache_prefix_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HL__cache_prefix_image_len(const void *thing,
@@ -82,8 +84,9 @@ static herr_t H5HL__cache_prefix_serialize(const H5F_t *f, void *image,
static herr_t H5HL__cache_prefix_free_icr(void *thing);
/* Local heap data block */
-static herr_t H5HL__cache_datablock_get_load_size(const void *udata,
- size_t *image_len);
+static herr_t H5HL__cache_datablock_get_load_size(const void *_image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static void *H5HL__cache_datablock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HL__cache_datablock_image_len(const void *thing,
@@ -108,6 +111,7 @@ const H5AC_class_t H5AC_LHEAP_PRFX[1] = {{
H5FD_MEM_LHEAP, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5HL__cache_prefix_get_load_size, /* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
H5HL__cache_prefix_deserialize, /* 'deserialize' callback */
H5HL__cache_prefix_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -124,6 +128,7 @@ const H5AC_class_t H5AC_LHEAP_DBLK[1] = {{
H5FD_MEM_LHEAP, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5HL__cache_datablock_get_load_size,/* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
H5HL__cache_datablock_deserialize, /* 'deserialize' callback */
H5HL__cache_datablock_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -281,15 +286,68 @@ H5HL__fl_serialize(const H5HL_t *heap)
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL__cache_prefix_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
+H5HL__cache_prefix_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- FUNC_ENTER_STATIC_NOERR
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for callback */
+ H5HL_t heap; /* Local heap */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+ HDassert(udata);
HDassert(image_len);
- *image_len = H5HL_SPEC_READ_SIZE;
+ if(image == NULL)
+ *image_len = H5HL_SPEC_READ_SIZE;
- FUNC_LEAVE_NOAPI(SUCCEED)
+ else { /* compute actual_len */
+
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+
+ /* Check magic number */
+ if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad local heap signature")
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ if(H5HL_VERSION != *image++)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "wrong version number in local heap")
+
+ /* Reserved */
+ image += 3;
+
+ /* Store the prefix's address & length */
+ heap.prfx_addr = udata->prfx_addr; /* NEED */
+ heap.prfx_size = udata->sizeof_prfx; /* NEED */
+
+ /* Heap data size */
+ H5F_DECODE_LENGTH_LEN(image, heap.dblk_size, udata->sizeof_size); /* NEED */
+
+ /* Free list head */
+ H5F_DECODE_LENGTH_LEN(image, heap.free_block, udata->sizeof_size);
+ if(heap.free_block != H5HL_FREE_NULL && heap.free_block >= heap.dblk_size)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap free list");
+
+ /* Heap data address */
+ H5F_addr_decode_len(udata->sizeof_addr, &image, &(heap.dblk_addr)); /* NEED */
+
+ *actual_len = heap.prfx_size;
+
+ /* Check if heap block exists */
+ if(heap.dblk_size) {
+ /* Check if heap data block is contiguous with header */
+ if(H5F_addr_eq((heap.prfx_addr + heap.prfx_size), heap.dblk_addr)) {
+ /* Note that the heap should be a single object in the cache */
+ *actual_len += heap.dblk_size;
+ }
+ } /* end if */
+ } /* end compute actual_len */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HL__cache_prefix_get_load_size() */
@@ -647,9 +705,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HL__cache_datablock_get_load_size(const void *_udata, size_t *image_len)
+H5HL__cache_datablock_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5HL_cache_dblk_ud_t *udata = (const H5HL_cache_dblk_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into image buffer */
+ H5HL_cache_dblk_ud_t *udata = (H5HL_cache_dblk_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -659,7 +720,12 @@ H5HL__cache_datablock_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->heap->dblk_size > 0);
HDassert(image_len);
- *image_len = udata->heap->dblk_size;
+ if(image == NULL)
+ *image_len = udata->heap->dblk_size;
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HL__cache_datablock_get_load_size() */
diff --git a/src/H5I.c b/src/H5I.c
index 15db6d6..e952388 100644
--- a/src/H5I.c
+++ b/src/H5I.c
@@ -789,6 +789,75 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5I_register_with_id
+ *
+ * Purpose: Registers an OBJECT in a TYPE with the supplied ID for it.
+ * This routine will check to ensure the supplied ID is not already
+ * in use, and ensure that it is a valid ID for the given type,
+ * but will NOT check to ensure the OBJECT is not already
+ * registered (thus, it is possible to register one object under
+ * multiple IDs).
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Mike McGreevy
+ * Wednesday, July 21, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5I_register_with_id(H5I_type_t type, const void *object, hbool_t app_ref, hid_t id)
+{
+ H5I_id_type_t *type_ptr; /*ptr to the type */
+ H5I_id_info_t *id_ptr; /*ptr to the new ID information */
+ herr_t ret_value = SUCCEED; /*return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Check arguments */
+
+ /* Make sure ID is not already in use */
+ if(NULL != (id_ptr = H5I__find_id(id)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADRANGE, FAIL, "ID already in use?!")
+
+ /* Make sure type number is valid */
+ if(type <= H5I_BADID || type >= H5I_next_type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
+
+ /* Get type pointer from list of types */
+ type_ptr = H5I_id_type_list_g[type];
+
+ if(NULL == type_ptr || type_ptr->init_count <= 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADGROUP, FAIL, "invalid type")
+
+ /* Make sure requested ID belongs to object's type */
+ if(H5I_TYPE(id) != type)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADRANGE, FAIL, "invalid type for provided ID")
+
+ /* Allocate new structure to house this ID */
+ if(NULL == (id_ptr = H5FL_MALLOC(H5I_id_info_t)))
+ HGOTO_ERROR(H5E_ATOM, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Create the struct & insert requested ID */
+ id_ptr->id = id;
+ id_ptr->count = 1; /*initial reference count*/
+ id_ptr->app_count = !!app_ref;
+ id_ptr->obj_ptr = object;
+
+ /* Insert into the type */
+ if(H5SL_insert(type_ptr->ids, id_ptr, &id_ptr->id) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_CANTINSERT, FAIL, "can't insert ID node into skip list")
+ type_ptr->id_count++;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5I_register_with_id() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5I_subst
*
* Purpose: Substitute a new object pointer for the specified ID.
diff --git a/src/H5Iprivate.h b/src/H5Iprivate.h
index f438581..c916f31 100644
--- a/src/H5Iprivate.h
+++ b/src/H5Iprivate.h
@@ -69,6 +69,7 @@ H5_DLL herr_t H5I_register_type(const H5I_class_t *cls);
H5_DLL int64_t H5I_nmembers(H5I_type_t type);
H5_DLL herr_t H5I_clear_type(H5I_type_t type, hbool_t force, hbool_t app_ref);
H5_DLL hid_t H5I_register(H5I_type_t type, const void *object, hbool_t app_ref);
+H5_DLL herr_t H5I_register_with_id(H5I_type_t type, const void *object, hbool_t app_ref, hid_t id);
H5_DLL void *H5I_subst(hid_t id, const void *new_object);
H5_DLL void *H5I_object(hid_t id);
H5_DLL void *H5I_object_verify(hid_t id, H5I_type_t id_type);
diff --git a/src/H5Lexternal.c b/src/H5Lexternal.c
index 139c5e6..dd2061c 100644
--- a/src/H5Lexternal.c
+++ b/src/H5Lexternal.c
@@ -318,7 +318,7 @@ H5L_extern_traverse(const char H5_ATTR_UNUSED *link_name, hid_t cur_group,
*/
/* Simplify intent flags for open calls */
- intent = ((intent & H5F_ACC_RDWR) ? H5F_ACC_RDWR : H5F_ACC_RDONLY);
+ intent &= (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE | H5F_ACC_SWMR_READ);
/* Copy the file name to use */
if(NULL == (temp_file_name = H5MM_strdup(file_name)))
diff --git a/src/H5MF.c b/src/H5MF.c
index 52ea82a..0f2857e 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1032,7 +1032,7 @@ H5MF_try_shrink(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
H5MF_sect_ud_t udata; /* User data for callback */
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- htri_t ret_value = FAIL; /* Return value */
+ htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
#ifdef H5MF_ALLOC_DEBUG
diff --git a/src/H5O.c b/src/H5O.c
index 34fe498..788d657 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -1086,6 +1086,117 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Odisable_mdc_flushes
+ *
+ * Purpose: To "cork" an object:
+ * --keep dirty entries assoicated with the object in the metadata cache
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Odisable_mdc_flushes(hid_t object_id)
+{
+ H5O_loc_t *oloc; /* Object location */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", object_id);
+
+ /* Get the object's oloc */
+ if((oloc = H5O_get_loc(object_id)) == NULL)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADVALUE, FAIL, "unable to get object location from ID")
+
+ if(H5AC_cork(oloc->file, oloc->addr, H5AC__SET_CORK, NULL) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unable to cork an object")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Odisable_mdc_flushes() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Oenable_mdc_flushes
+ *
+ * Purpose: To "uncork" an object
+ * --release keeping dirty entries associated with the object
+ * in the metadata cache
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Oenable_mdc_flushes(hid_t object_id)
+{
+ H5O_loc_t *oloc; /* Object location */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", object_id);
+
+ /* Get the object's oloc */
+ if((oloc = H5O_get_loc(object_id)) == NULL)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADVALUE, FAIL, "unable to get object location from ID")
+
+ /* Set the value */
+ if(H5AC_cork(oloc->file, oloc->addr, H5AC__UNCORK, NULL) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unable to uncork an object")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Oenable_mdc_flushes() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Oare_mdc_flushes_disabled
+ *
+ * Purpose: Retrieve the object's "cork" status in the parameter "are_disabled":
+ * TRUE if mdc flushes for the object is disabled
+ * FALSE if mdc flushes for the object is not disabled
+ * Return error if the parameter "are_disabled" is not supplied
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; January 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Oare_mdc_flushes_disabled(hid_t object_id, hbool_t *are_disabled)
+{
+ H5O_loc_t *oloc; /* Object location */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*b", object_id, are_disabled);
+
+ /* Check args */
+
+ /* Get the object's oloc */
+ if((oloc = H5O_get_loc(object_id)) == NULL)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADVALUE, FAIL, "unable to get object location from ID")
+ if(!are_disabled)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADVALUE, FAIL, "unable to get object location from ID")
+
+ /* Get the cork status */
+ if(H5AC_cork(oloc->file, oloc->addr, H5AC__GET_CORKED, are_disabled) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADVALUE, FAIL, "unable to retrieve an object's cork status")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Oare_mdc_flushes_disabled() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5O_create
*
* Purpose: Creates a new object header. Allocates space for it and
@@ -1146,12 +1257,13 @@ H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, size_t initial_rc,
/* Initialize file-specific information for object header */
store_msg_crt_idx = H5F_STORE_MSG_CRT_IDX(f);
- if(H5F_USE_LATEST_FORMAT(f) || store_msg_crt_idx || (oh_flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED))
+ if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_OBJ_HEADER) || store_msg_crt_idx || (oh_flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED))
oh->version = H5O_VERSION_LATEST;
else
oh->version = H5O_VERSION_1;
oh->sizeof_size = H5F_SIZEOF_SIZE(f);
oh->sizeof_addr = H5F_SIZEOF_ADDR(f);
+ oh->swmr_write = !!(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE);
#ifdef H5O_ENABLE_BAD_MESG_COUNT
/* Check whether the "bad message count" property is set */
if(H5P_exist_plist(oc_plist, H5O_BAD_MESG_COUNT_NAME) > 0) {
@@ -1164,6 +1276,14 @@ H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, size_t initial_rc,
/* Set initial status flags */
oh->flags = oh_flags;
+ /* Create object header proxy if doing SWMR writes */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) {
+ if(H5O_proxy_create(f, dxpl_id, oh) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "can't create object header proxy")
+ } /* end if */
+ else
+ oh->proxy_addr = HADDR_UNDEF;
+
/* Initialize version-specific fields */
if(oh->version > H5O_VERSION_1) {
/* Initialize all time fields with current time, if we are storing them */
@@ -2201,6 +2321,7 @@ H5O_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
H5O_t *oh = NULL; /* Object header information */
H5O_loc_t loc; /* Object location for object to delete */
unsigned oh_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting object header */
+ hbool_t corked;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, addr, FAIL)
@@ -2222,6 +2343,14 @@ H5O_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
if(H5O_delete_oh(f, dxpl_id, oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "can't delete object from file")
+ /* Uncork cache entries with tag: addr */
+ if(H5AC_cork(f, addr, H5AC__GET_CORKED, &corked) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_SYSTEM, FAIL, "unable to retrieve an object's cork status")
+ else if(corked) {
+ if(H5AC_cork(f, addr, H5AC__UNCORK, NULL) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_SYSTEM, FAIL, "unable to uncork an object")
+ }
+
/* Mark object header as deleted */
oh_flags = H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
@@ -3539,3 +3668,121 @@ H5O_free(H5O_t *oh)
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5O_free() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_pin_flush_dep_proxy
+ *
+ * Purpose: Pin an object header proxy for use as a flush dependency
+ * parent for items referenced by the object header.
+ *
+ * Return: Success: Pointer to the object header proxy
+ * structure for the object.
+ * Failure: NULL
+ *
+ * Programmer: Neil Fortner
+ * Mar 16 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+H5O_proxy_t *
+H5O_pin_flush_dep_proxy(H5O_loc_t *loc, hid_t dxpl_id)
+{
+ H5O_t *oh = NULL; /* Object header */
+ H5O_proxy_t *proxy = NULL; /* Object header proxy */
+ H5O_proxy_t *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI(NULL)
+
+ /* check args */
+ HDassert(loc);
+
+ /* Get header */
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header")
+
+ /* Pin object header proxy */
+ if(NULL == (proxy = H5O_proxy_pin(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPIN, NULL, "unable to pin object header proxy")
+
+ /* Set the return value */
+ ret_value = proxy;
+
+done:
+ /* Release the object header from the cache */
+ if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to release object header")
+
+ if(!ret_value)
+ if(proxy && H5O_proxy_unpin(proxy) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPIN, NULL, "unable to release object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_pin_flush_dep_proxy() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_pin_flush_dep_proxy_oh
+ *
+ * Purpose: Pin an object header proxy for use as a flush dependency
+ * parent for items referenced by the object header.
+ *
+ * Return: Success: Pointer to the object header proxy
+ * structure for the object.
+ * Failure: NULL
+ *
+ * Programmer: Neil Fortner
+ * Mar 16 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+H5O_proxy_t *
+H5O_pin_flush_dep_proxy_oh(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
+{
+ H5O_proxy_t *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI(NULL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(oh);
+
+ /* Pin object header proxy */
+ if(NULL == (ret_value = H5O_proxy_pin(f, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPIN, NULL, "unable to pin object header proxy")
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_pin_flush_dep_proxy_oh() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_unpin_flush_dep_proxy
+ *
+ * Purpose: Unpin an object header proxy.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 16 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_unpin_flush_dep_proxy(H5O_proxy_t *proxy)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* check args */
+ HDassert(proxy);
+
+ /* Unin object header proxy */
+ if(H5O_proxy_unpin(proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPIN, FAIL, "unable to unpin object header proxy")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_unpin_flush_dep_proxy() */
+
diff --git a/src/H5Oainfo.c b/src/H5Oainfo.c
index c5f692d..9e32af4 100644
--- a/src/H5Oainfo.c
+++ b/src/H5Oainfo.c
@@ -324,7 +324,9 @@ H5O_ainfo_free(void *mesg)
/*-------------------------------------------------------------------------
* Function: H5O_ainfo_delete
*
- * Purpose: Free file space referenced by message
+ * Purpose: Free file space referenced by message. Note that open_oh
+ * *must* be non-NULL - this means that calls to
+ * H5O_msg_delete must include an oh if the type is ainfo.
*
* Return: Non-negative on success/Negative on failure
*
@@ -334,9 +336,10 @@ H5O_ainfo_free(void *mesg)
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_ainfo_delete(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh, void *_mesg)
+H5O_ainfo_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, void *_mesg)
{
H5O_ainfo_t *ainfo = (H5O_ainfo_t *)_mesg;
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -344,13 +347,26 @@ H5O_ainfo_delete(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh, void *_
/* check args */
HDassert(f);
HDassert(ainfo);
+ HDassert(open_oh);
/* If the object is using "dense" attribute storage, delete it */
- if(H5F_addr_defined(ainfo->fheap_addr))
- if(H5A_dense_delete(f, dxpl_id, ainfo) < 0)
+ if(H5F_addr_defined(ainfo->fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(f, dxpl_id, open_oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
+ /* Delete the attribute */
+ if(H5A_dense_delete(f, dxpl_id, ainfo, oh_proxy) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free dense attribute storage")
+ } /* end if */
done:
+ /* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O_ainfo_delete() */
@@ -428,12 +444,14 @@ H5O_ainfo_copy_file(H5F_t *file_src, void *mesg_src, H5F_t *file_dst,
*ainfo_dst = *ainfo_src;
if(H5F_addr_defined(ainfo_src->fheap_addr)) {
- /* copy dense attribute */
+ /* Prepare to copy dense attributes - actual copy in post_copy */
/* Set copied metadata tag */
H5_BEGIN_TAG(dxpl_id, H5AC__COPIED_TAG, NULL);
- if(H5A_dense_create(file_dst, dxpl_id, ainfo_dst) < 0)
+ /*!FIXME Must pass something for the parent, once we have a way to
+ * depend on an object being copied (ohdr proxy?) -NAF */
+ if(H5A_dense_create(file_dst, dxpl_id, ainfo_dst, NULL) < 0)
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTINIT, NULL, "unable to create dense storage for attributes")
/* Reset metadata tag */
diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c
index 3d930f0..7263ca6 100644
--- a/src/H5Oalloc.c
+++ b/src/H5Oalloc.c
@@ -921,7 +921,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size, size_t *new
oh->chunk = x;
} /* end if */
- chunkno = oh->nchunks++;
+ chunkno = (unsigned)oh->nchunks++;
oh->chunk[chunkno].addr = new_chunk_addr;
oh->chunk[chunkno].size = size;
oh->chunk[chunkno].gap = 0;
@@ -966,6 +966,8 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size, size_t *new
oh->nmesgs--;
} /* end if */
else {
+ HDassert(curr_msg->type->id != H5O_CONT_ID);
+
/* Copy the raw data */
HDmemcpy(p, curr_msg->raw - (size_t)H5O_SIZEOF_MSGHDR_OH(oh),
curr_msg->raw_size + (size_t)H5O_SIZEOF_MSGHDR_OH(oh));
@@ -1082,7 +1084,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size, size_t *new
oh->mesg[idx].chunkno = chunkno;
/* Insert the new chunk into the cache */
- if(H5O_chunk_add(f, dxpl_id, oh, chunkno) < 0)
+ if(H5O_chunk_add(f, dxpl_id, oh, chunkno, oh->mesg[found_null].chunkno) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't add new chunk to cache")
/* Initialize the continuation information */
@@ -1455,6 +1457,7 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
{
H5O_chunk_proxy_t *null_chk_proxy = NULL; /* Chunk that null message is in */
H5O_chunk_proxy_t *curr_chk_proxy = NULL; /* Chunk that message is in */
+ H5O_chunk_proxy_t *cont_targ_chk_proxy = NULL; /* Chunk that continuation message points to */
hbool_t null_chk_dirtied = FALSE; /* Flags for unprotecting null chunk */
hbool_t curr_chk_dirtied = FALSE; /* Flags for unprotecting curr chunk */
hbool_t packed_msg; /* Flag to indicate that messages were packed */
@@ -1544,13 +1547,13 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
if(H5O_CONT_ID == curr_msg->type->id) {
htri_t status; /* Status from moving messages */
- if((status = H5O_move_cont(f, dxpl_id, oh, u)) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "Error in moving messages into cont message")
- else if(status > 0) { /* Message(s) got moved into "continuation" message */
+ if((status = H5O_move_cont(f, dxpl_id, oh, u)) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "Error in moving messages into cont message")
+ else if(status > 0) { /* Message(s) got moved into "continuation" message */
packed_msg = TRUE;
- break;
- } /* end else-if */
- } /* end if */
+ break;
+ } /* end else-if */
+ } /* end if */
/* Don't let locked messages be moved into earlier chunk */
if(!curr_msg->locked) {
@@ -1571,6 +1574,65 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
if(NULL == (curr_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, curr_msg->chunkno)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
+ /* If the message being moved is a continuation
+ * message and we are doing SWMR writes, we must
+ * update the flush dependencies */
+ if((H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ && (H5O_CONT_ID == curr_msg->type->id)) {
+ void *null_chk_mdc_obj = NULL; /* The metadata cache object for the null_msg chunk */
+
+ /* Point to the metadata cache object for the
+ * null message chunk, oh if in chunk 0 or the
+ * proxy otherwise */
+ null_chk_mdc_obj = (null_msg->chunkno == 0
+ ? (void *)oh
+ : (void *)null_chk_proxy);
+
+ /* The other chunks involved should never be
+ * chunk 0 */
+ HDassert(curr_msg->chunkno > 0);
+ HDassert(((H5O_cont_t *)(curr_msg->native))->chunkno > 0);
+
+ /* Protect continuation message target chunk */
+ if(NULL == (cont_targ_chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, ((H5O_cont_t *)(curr_msg->native))->chunkno)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
+
+ /* Remove flush dependency on old continuation
+ * message chunk */
+ HDassert(cont_targ_chk_proxy);
+ HDassert(cont_targ_chk_proxy->fd_parent_addr != HADDR_UNDEF);
+ HDassert(cont_targ_chk_proxy->fd_parent_ptr);
+ HDassert(curr_chk_proxy);
+ HDassert((void *)curr_chk_proxy == cont_targ_chk_proxy->fd_parent_ptr);
+ HDassert(H5F_addr_eq(curr_chk_proxy->cache_info.addr, cont_targ_chk_proxy->fd_parent_addr));
+
+ if(H5AC_destroy_flush_dependency(curr_chk_proxy, cont_targ_chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+ cont_targ_chk_proxy->fd_parent_addr = HADDR_UNDEF;
+ cont_targ_chk_proxy->fd_parent_ptr = NULL;
+
+ /* Create flush dependency on new continuation
+ * message chunk */
+ if(H5AC_create_flush_dependency(null_chk_mdc_obj, cont_targ_chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+ HDassert(null_chk_mdc_obj);
+ HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(((H5C_cache_entry_t *)null_chk_mdc_obj)->type);
+ HDassert((((H5C_cache_entry_t *)null_chk_mdc_obj)->type->id == H5AC_OHDR_ID) ||
+ (((H5C_cache_entry_t *)null_chk_mdc_obj)->type->id == H5AC_OHDR_CHK_ID));
+
+ cont_targ_chk_proxy->fd_parent_addr = ((H5C_cache_entry_t *)null_chk_mdc_obj)->addr;
+ cont_targ_chk_proxy->fd_parent_ptr = null_chk_mdc_obj;
+
+ /* Unprotect continuation message target chunk
+ */
+ if(H5O_chunk_unprotect(f, dxpl_id, cont_targ_chk_proxy, FALSE) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
+ cont_targ_chk_proxy = NULL;
+ } /* end if */
+
/* Copy raw data for non-null message to new chunk */
HDmemcpy(null_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), curr_msg->raw - H5O_SIZEOF_MSGHDR_OH(oh), curr_msg->raw_size + (size_t)H5O_SIZEOF_MSGHDR_OH(oh));
@@ -1715,10 +1777,16 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
ret_value = (htri_t)did_packing;
done:
- if(null_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, null_chk_dirtied) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect null object header chunk")
- if(curr_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_dirtied) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect current object header chunk")
+ if(ret_value < 0) {
+ if(null_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, null_chk_dirtied) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect null object header chunk")
+ if(curr_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_dirtied) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect current object header chunk")
+ if(cont_targ_chk_proxy && H5O_chunk_unprotect(f, dxpl_id, cont_targ_chk_proxy, FALSE) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect continuation message target object header chunk")
+ } /* end if */
+ else
+ HDassert(!null_chk_proxy && !curr_chk_proxy && !cont_targ_chk_proxy);
FUNC_LEAVE_NOAPI(ret_value)
} /* H5O_move_msgs_forward() */
diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c
index 9b494a1..a11e6dc 100644
--- a/src/H5Oattribute.c
+++ b/src/H5Oattribute.c
@@ -220,6 +220,7 @@ herr_t
H5O_attr_create(const H5O_loc_t *loc, hid_t dxpl_id, H5A_t *attr)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
htri_t shared_mesg; /* Should this message be stored in the Shared Message table? */
herr_t ret_value = SUCCEED; /* Return value */
@@ -280,8 +281,14 @@ H5O_attr_create(const H5O_loc_t *loc, hid_t dxpl_id, H5A_t *attr)
H5O_iter_cvt_t udata; /* User data for callback */
H5O_mesg_operator_t op; /* Wrapper for operator */
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Create dense storage for attributes */
- if(H5A_dense_create(loc->file, dxpl_id, &ainfo) < 0)
+ if(H5A_dense_create(loc->file, dxpl_id, &ainfo, oh_proxy) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to create dense storage for attributes")
/* Set up user data for callback */
@@ -394,6 +401,8 @@ H5O_attr_create(const H5O_loc_t *loc, hid_t dxpl_id, H5A_t *attr)
HGOTO_ERROR(H5E_ATTR, H5E_CANTUPDATE, FAIL, "unable to update time on object")
done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unpin(oh) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin object header")
@@ -471,6 +480,7 @@ H5A_t *
H5O_attr_open_by_name(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
H5A_t *exist_attr = NULL; /* Existing opened attribute object */
H5A_t *opened_attr = NULL; /* Newly opened attribute object */
@@ -507,8 +517,14 @@ H5O_attr_open_by_name(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
else {
/* Check for attributes in dense storage */
if(H5F_addr_defined(ainfo.fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, NULL, "unable to pin object header proxy")
+
/* Open attribute with dense storage */
- if(NULL == (opened_attr = H5A_dense_open(loc->file, dxpl_id, &ainfo, name)))
+ if(NULL == (opened_attr = H5A_dense_open(loc->file, dxpl_id, &ainfo, name, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, NULL, "can't open attribute")
} /* end if */
else {
@@ -543,6 +559,8 @@ H5O_attr_open_by_name(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
ret_value = opened_attr;
done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, NULL, "unable to unpin attribute object header proxy")
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, NULL, "unable to release object header")
@@ -1181,6 +1199,7 @@ H5O_attr_rename(const H5O_loc_t *loc, hid_t dxpl_id, const char *old_name,
const char *new_name)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Attribute's object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1205,8 +1224,14 @@ H5O_attr_rename(const H5O_loc_t *loc, hid_t dxpl_id, const char *old_name,
/* Check for attributes stored densely */
if(H5F_addr_defined(ainfo.fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin attribute object header proxy")
+
/* Rename the attribute data in dense storage */
- if(H5A_dense_rename(loc->file, dxpl_id, &ainfo, old_name, new_name) < 0)
+ if(H5A_dense_rename(loc->file, dxpl_id, &ainfo, old_name, new_name, oh_proxy) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTUPDATE, FAIL, "error updating attribute")
} /* end if */
else {
@@ -1246,6 +1271,8 @@ H5O_attr_rename(const H5O_loc_t *loc, hid_t dxpl_id, const char *old_name,
HGOTO_ERROR(H5E_ATTR, H5E_CANTUPDATE, FAIL, "unable to update time on object")
done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unpin(oh) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin object header")
@@ -1271,6 +1298,7 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, hid_t dxpl_id,
hsize_t *last_attr, const H5A_attr_iter_op_t *attr_op, void *op_data)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */
herr_t ret_value = FAIL; /* Return value */
@@ -1301,13 +1329,19 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, hid_t dxpl_id,
if(skip > 0 && skip >= ainfo.nattrs)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified")
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Release the object header */
if(H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
oh = NULL;
/* Iterate over attributes in dense storage */
- if((ret_value = H5A_dense_iterate(loc->file, dxpl_id, loc_id, &ainfo, idx_type, order, skip, last_attr, attr_op, op_data)) < 0)
+ if((ret_value = H5A_dense_iterate(loc->file, dxpl_id, loc_id, &ainfo, idx_type, order, skip, oh_proxy, last_attr, attr_op, op_data)) < 0)
HERROR(H5E_ATTR, H5E_BADITER, "error iterating over attributes");
} /* end if */
else {
@@ -1331,6 +1365,8 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, hid_t dxpl_id,
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
if(atable.attrs && H5A_attr_release_table(&atable) < 0)
@@ -1403,6 +1439,7 @@ H5O_attr_remove_update(const H5O_loc_t *loc, H5O_t *oh, H5O_ainfo_t *ainfo,
hid_t dxpl_id)
{
H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -1420,8 +1457,14 @@ H5O_attr_remove_update(const H5O_loc_t *loc, H5O_t *oh, H5O_ainfo_t *ainfo,
hbool_t can_convert = TRUE; /* Whether converting to attribute messages is possible */
size_t u; /* Local index */
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Build the table of attributes for this object */
- if(H5A_dense_build_table(loc->file, dxpl_id, ainfo, H5_INDEX_NAME, H5_ITER_NATIVE, &atable) < 0)
+ if(H5A_dense_build_table(loc->file, dxpl_id, ainfo, H5_INDEX_NAME, H5_ITER_NATIVE, oh_proxy, &atable) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "error building attribute table")
/* Inspect attributes in table for ones that can't be converted back
@@ -1476,7 +1519,7 @@ H5O_attr_remove_update(const H5O_loc_t *loc, H5O_t *oh, H5O_ainfo_t *ainfo,
} /* end for */
/* Remove the dense storage */
- if(H5A_dense_delete(loc->file, dxpl_id, ainfo) < 0)
+ if(H5A_dense_delete(loc->file, dxpl_id, ainfo, oh_proxy) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete dense attribute storage")
} /* end if */
} /* end if */
@@ -1495,6 +1538,8 @@ H5O_attr_remove_update(const H5O_loc_t *loc, H5O_t *oh, H5O_ainfo_t *ainfo,
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(atable.attrs && H5A_attr_release_table(&atable) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTFREE, FAIL, "unable to release attribute table")
@@ -1567,6 +1612,7 @@ herr_t
H5O_attr_remove(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
htri_t ainfo_exists = FALSE; /* Whether the attribute info exists in the file */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1591,8 +1637,14 @@ H5O_attr_remove(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
/* Check for attributes stored densely */
if(H5F_addr_defined(ainfo.fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Delete attribute from dense storage */
- if(H5A_dense_remove(loc->file, dxpl_id, &ainfo, name) < 0)
+ if(H5A_dense_remove(loc->file, dxpl_id, &ainfo, name, oh_proxy) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete attribute in dense storage")
} /* end if */
else {
@@ -1626,6 +1678,8 @@ H5O_attr_remove(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
HGOTO_ERROR(H5E_ATTR, H5E_CANTUPDATE, FAIL, "unable to update time on object")
done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unpin(oh) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin object header")
@@ -1651,6 +1705,7 @@ H5O_attr_remove_by_idx(const H5O_loc_t *loc, H5_index_t idx_type,
H5_iter_order_t order, hsize_t n, hid_t dxpl_id)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
htri_t ainfo_exists = FALSE; /* Whether the attribute info exists in the file */
H5A_attr_table_t atable = {0, NULL}; /* Table of attributes */
@@ -1675,8 +1730,14 @@ H5O_attr_remove_by_idx(const H5O_loc_t *loc, H5_index_t idx_type,
/* Check for attributes stored densely */
if(H5F_addr_defined(ainfo.fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Delete attribute from dense storage */
- if(H5A_dense_remove_by_idx(loc->file, dxpl_id, &ainfo, idx_type, order, n) < 0)
+ if(H5A_dense_remove_by_idx(loc->file, dxpl_id, &ainfo, idx_type, order, n, oh_proxy) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTDELETE, FAIL, "unable to delete attribute in dense storage")
} /* end if */
else {
@@ -1718,6 +1779,8 @@ H5O_attr_remove_by_idx(const H5O_loc_t *loc, H5_index_t idx_type,
HGOTO_ERROR(H5E_ATTR, H5E_CANTUPDATE, FAIL, "unable to update time on object")
done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unpin(oh) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin object header")
if(atable.attrs && H5A_attr_release_table(&atable) < 0)
@@ -1837,6 +1900,7 @@ htri_t
H5O_attr_exists(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
{
H5O_t *oh = NULL; /* Pointer to actual object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5O_ainfo_t ainfo; /* Attribute information for object */
htri_t ret_value = FAIL; /* Return value */
@@ -1860,8 +1924,14 @@ H5O_attr_exists(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
/* Check for attributes stored densely */
if(H5F_addr_defined(ainfo.fheap_addr)) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Check if attribute exists in dense storage */
- if((ret_value = H5A_dense_exists(loc->file, dxpl_id, &ainfo, name)) < 0)
+ if((ret_value = H5A_dense_exists(loc->file, dxpl_id, &ainfo, name, oh_proxy)) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_BADITER, FAIL, "error checking for existence of attribute")
} /* end if */
else {
@@ -1885,6 +1955,8 @@ H5O_attr_exists(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
} /* end else */
done:
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
@@ -1910,6 +1982,7 @@ H5O_attr_bh_info(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info)
H5HF_t *fheap = NULL; /* Fractal heap handle */
H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
H5B2_t *bt2_corder = NULL; /* v2 B-tree handle for creation order index */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1927,10 +2000,16 @@ H5O_attr_bh_info(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info)
if((ainfo_exists = H5A_get_ainfo(f, dxpl_id, oh, &ainfo)) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTGET, FAIL, "can't check for attribute info message")
else if(ainfo_exists > 0) {
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(f, dxpl_id, oh)))
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Check if name index available */
if(H5F_addr_defined(ainfo.name_bt2_addr)) {
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo.name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(f, dxpl_id, ainfo.name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Get name index B-tree size */
@@ -1941,7 +2020,7 @@ H5O_attr_bh_info(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info)
/* Check if creation order index available */
if(H5F_addr_defined(ainfo.corder_bt2_addr)) {
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2_corder = H5B2_open(f, dxpl_id, ainfo.corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(f, dxpl_id, ainfo.corder_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Get creation order index B-tree size */
@@ -1964,6 +2043,8 @@ H5O_attr_bh_info(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info)
done:
/* Release resources */
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_ATTR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(fheap && H5HF_close(fheap, dxpl_id) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTCLOSEOBJ, FAIL, "can't close fractal heap")
if(bt2_name && H5B2_close(bt2_name, dxpl_id) < 0)
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index 945d12e..fb12a06 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -38,6 +38,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5FLprivate.h" /* Free lists */
#include "H5MFprivate.h" /* File memory management */
+#include "H5MMprivate.h" /* Memory management */
#include "H5Opkg.h" /* Object headers */
#include "H5WBprivate.h" /* Wrapped Buffers */
@@ -68,7 +69,10 @@
/********************/
/* Metadata cache callbacks */
-static herr_t H5O__cache_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5O__cache_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5O__cache_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5O__cache_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5O__cache_image_len(const void *thing, size_t *image_len,
@@ -78,13 +82,17 @@ static herr_t H5O__cache_serialize(const H5F_t *f, void *image, size_t len,
static herr_t H5O__cache_free_icr(void *thing);
static herr_t H5O__cache_clear(const H5F_t *f, void *thing, hbool_t about_to_destroy);
-static herr_t H5O__cache_chk_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5O__cache_chk_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5O__cache_chk_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5O__cache_chk_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5O__cache_chk_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
+static herr_t H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing);
static herr_t H5O__cache_chk_free_icr(void *thing);
static herr_t H5O__cache_chk_clear(const H5F_t *f, void *thing, hbool_t about_to_destroy);
@@ -99,6 +107,7 @@ static herr_t H5O__chunk_serialize(const H5F_t *f, H5O_t *oh, unsigned chunkno);
/* Misc. routines */
static herr_t H5O__add_cont_msg(H5O_cont_msgs_t *cont_msg_info,
const H5O_cont_t *cont);
+static herr_t H5O_decode_prefix(H5F_t *f, H5O_t *oh, const uint8_t *buf, void *_udata);
/*********************/
@@ -112,6 +121,7 @@ const H5AC_class_t H5AC_OHDR[1] = {{
H5FD_MEM_OHDR, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5O__cache_get_load_size, /* 'get_load_size' callback */
+ H5O__cache_verify_chksum, /* 'verify_chksum' callback */
H5O__cache_deserialize, /* 'deserialize' callback */
H5O__cache_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -129,11 +139,12 @@ const H5AC_class_t H5AC_OHDR_CHK[1] = {{
H5FD_MEM_OHDR, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5O__cache_chk_get_load_size, /* 'get_load_size' callback */
+ H5O__cache_chk_verify_chksum, /* 'verify_chksum' callback */
H5O__cache_chk_deserialize, /* 'deserialize' callback */
H5O__cache_chk_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5O__cache_chk_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5O__cache_chk_notify, /* 'notify' callback */
H5O__cache_chk_free_icr, /* 'free_icr' callback */
H5O__cache_chk_clear, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
@@ -158,102 +169,50 @@ H5FL_SEQ_DEFINE(H5O_cont_t);
/* Local Variables */
/*******************/
-
/*-------------------------------------------------------------------------
- * Function: H5O__cache_get_load_size()
+ * Function: H5O_decode_prefix
*
- * Purpose: Tell the metadata cache how much data to read from file in
- * the first speculative read for the object header. Note that we do
- * not have to be concerned about reading past the end of file, as the
- * cache will clamp the read to avoid this if needed.
+ * Purpose: To decode the object header prefix.
+ * The coding is extracted fromt H5O__cache_deserialize() to this routine.
*
- * Return: Success: SUCCEED
- * Failure: FAIL
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: John Mainzer
- * 7/28/14
+ * Programmer: Vailin Choi; Aug 2015
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5O__cache_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
+H5O_decode_prefix(H5F_t *f, H5O_t *oh, const uint8_t *buf, void *_udata)
{
- FUNC_ENTER_STATIC_NOERR
-
- /* Check arguments */
- HDassert(image_len);
-
- *image_len = H5O_SPEC_READ_SIZE;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5O__cache_get_load_size() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5O__cache_deserialize
- *
- * Purpose: Attempt to deserialize the object header contained in the
- * supplied buffer, load the data into an instance of H5O_t, and
- * return a pointer to the new instance.
- *
- * Note that the object header is read with with a speculative read.
- * If the initial read is too small, make note of this fact and return
- * without error. H5C_load_entry() will note the size discrepency
- * and retry the deserialize operation with the correct size read.
- *
- * Return: Success: Pointer to in core representation
- * Failure: NULL
- *
- * Programmer: John Mainzer
- * 7/28/14
- *
- *-------------------------------------------------------------------------
- */
-static void *
-H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
- hbool_t *dirty)
-{
- H5O_t *oh = NULL; /* Object header read in */
- H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
- const uint8_t *image = (const uint8_t *)_image; /* Pointer into buffer to decode */
- size_t prefix_size; /* Size of object header prefix */
- size_t buf_size; /* Size of prefix+chunk #0 buffer */
- void * ret_value = NULL; /* Return value */
+ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *p = buf; /* Pointer into buffer to decode */
+ size_t prefix_size; /* Size of object header prefix */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_NOAPI_NOINIT
/* Check arguments */
- HDassert(image);
- HDassert(len > 0);
+ HDassert(f);
+ HDassert(oh);
+ HDassert(buf);
HDassert(udata);
- HDassert(udata->common.f);
- HDassert(udata->common.cont_msg_info);
- HDassert(dirty);
-
- /* Allocate space for the object header data structure */
- if(NULL == (oh = H5FL_CALLOC(H5O_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
-
- /* File-specific, non-stored information */
- oh->sizeof_size = H5F_SIZEOF_SIZE(udata->common.f);
- oh->sizeof_addr = H5F_SIZEOF_ADDR(udata->common.f);
/* Check for presence of magic number */
/* (indicates version 2 or later) */
- if(!HDmemcmp(image, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
+ if(!HDmemcmp(p, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
/* Magic number */
- image += H5_SIZEOF_MAGIC;
+ p += H5_SIZEOF_MAGIC;
/* Version */
- oh->version = *image++;
- if(H5O_VERSION_2 != oh->version)
- HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "bad object header version number")
+ oh->version = *p++;
+ if(H5O_VERSION_2 != oh->version)
+ HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number")
/* Flags */
- oh->flags = *image++;
- if(oh->flags & ~H5O_HDR_ALL_FLAGS)
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "unknown object header status flag(s)")
+ oh->flags = *p++;
+ if(oh->flags & ~H5O_HDR_ALL_FLAGS)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown object header status flag(s)")
/* Number of links to object (unless overridden by refcount message) */
oh->nlink = 1;
@@ -262,13 +221,13 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
if(oh->flags & H5O_HDR_STORE_TIMES) {
uint32_t tmp; /* Temporary value */
- UINT32DECODE(image, tmp);
+ UINT32DECODE(p, tmp);
oh->atime = (time_t)tmp;
- UINT32DECODE(image, tmp);
+ UINT32DECODE(p, tmp);
oh->mtime = (time_t)tmp;
- UINT32DECODE(image, tmp);
+ UINT32DECODE(p, tmp);
oh->ctime = (time_t)tmp;
- UINT32DECODE(image, tmp);
+ UINT32DECODE(p, tmp);
oh->btime = (time_t)tmp;
} /* end if */
else
@@ -276,11 +235,10 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
/* Attribute fields */
if(oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) {
- UINT16DECODE(image, oh->max_compact);
- UINT16DECODE(image, oh->min_dense);
-
+ UINT16DECODE(p, oh->max_compact);
+ UINT16DECODE(p, oh->min_dense);
if(oh->max_compact < oh->min_dense)
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad object header attribute phase change values")
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header attribute phase change values")
} /* end if */
else {
oh->max_compact = H5O_CRT_ATTR_MAX_COMPACT_DEF;
@@ -290,44 +248,44 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
/* First chunk size */
switch(oh->flags & H5O_HDR_CHUNK0_SIZE) {
case 0: /* 1 byte size */
- oh->chunk0_size = *image++;
+ oh->chunk0_size = *p++;
break;
case 1: /* 2 byte size */
- UINT16DECODE(image, oh->chunk0_size);
+ UINT16DECODE(p, oh->chunk0_size);
break;
case 2: /* 4 byte size */
- UINT32DECODE(image, oh->chunk0_size);
+ UINT32DECODE(p, oh->chunk0_size);
break;
case 3: /* 8 byte size */
- UINT64DECODE(image, oh->chunk0_size);
+ UINT64DECODE(p, oh->chunk0_size);
break;
default:
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad size for chunk 0")
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0")
} /* end switch */
if(oh->chunk0_size > 0 && oh->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh))
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad object header chunk size")
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size")
} /* end if */
else {
/* Version */
- oh->version = *image++;
+ oh->version = *p++;
if(H5O_VERSION_1 != oh->version)
- HGOTO_ERROR(H5E_OHDR, H5E_VERSION, NULL, "bad object header version number")
+ HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number")
/* Flags */
oh->flags = H5O_CRT_OHDR_FLAGS_DEF;
/* Reserved */
- image++;
+ p++;
/* Number of messages */
- UINT16DECODE(image, udata->v1_pfx_nmesgs);
+ UINT16DECODE(p, udata->v1_pfx_nmesgs);
/* Link count */
- UINT32DECODE(image, oh->nlink);
+ UINT32DECODE(p, oh->nlink);
/* Reset unused time fields */
oh->atime = oh->mtime = oh->ctime = oh->btime = 0;
@@ -337,24 +295,184 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
oh->min_dense = 0;
/* First chunk size */
- UINT32DECODE(image, oh->chunk0_size);
-
- if((udata->v1_pfx_nmesgs > 0 &&
- oh->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) ||
- (udata->v1_pfx_nmesgs == 0 && oh->chunk0_size > 0))
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad object header chunk size")
+ UINT32DECODE(p, oh->chunk0_size);
+ if((udata->v1_pfx_nmesgs > 0 && oh->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) ||
+ (udata->v1_pfx_nmesgs == 0 && oh->chunk0_size > 0))
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size")
/* Reserved, in version 1 (for 8-byte alignment padding) */
- image += 4;
+ p += 4;
} /* end else */
/* Determine object header prefix length */
- prefix_size = (size_t)(image - (const uint8_t *)_image);
+ prefix_size = (size_t)(p - buf);
HDassert((size_t)prefix_size == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5O_decode_prefix() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_get_load_size()
+ *
+ * Purpose: Tell the metadata cache how much data to read from file in
+ * the first speculative read for the object header. Note that we do
+ * not have to be concerned about reading past the end of file, as the
+ * cache will clamp the read to avoid this if needed.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_get_load_size(const void *_image, void *_udata, size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
+ H5O_t oh; /* Object header read in */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image_len);
+
+ if(image == NULL)
+ *image_len = H5O_SPEC_READ_SIZE;
+
+ else { /* compute actual_len */
+ HDassert(udata);
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+
+ /* Decode header prefix */
+ if(H5O_decode_prefix(udata->common.f, &oh, image, udata) < 0)
+ HGOTO_DONE(FAIL)
+
+ /* Save the version to be used in verify_chksum callback */
+ udata->version = oh.version;
+ *actual_len = oh.chunk0_size + (size_t)H5O_SIZEOF_HDR(&oh);
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5O__cache_verify_chksum(const void *_image, size_t len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+
+ /* There is no checksum for version 1 */
+ if(udata->version != H5O_VERSION_1) {
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_deserialize
+ *
+ * Purpose: Attempt to deserialize the object header contained in the
+ * supplied buffer, load the data into an instance of H5O_t, and
+ * return a pointer to the new instance.
+ *
+ * Note that the object header is read with with a speculative read.
+ * If the initial read is too small, make note of this fact and return
+ * without error. H5C_load_entry() will note the size discrepency
+ * and retry the deserialize operation with the correct size read.
+ *
+ * Return: Success: Pointer to in core representation
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 7/28/14
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
+ hbool_t *dirty)
+{
+ H5O_t *oh = NULL; /* Object header read in */
+ H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into buffer to decode */
+ size_t buf_size; /* Size of prefix+chunk #0 buffer */
+ void * ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(len > 0);
+ HDassert(udata);
+ HDassert(udata->common.f);
+ HDassert(udata->common.cont_msg_info);
+ HDassert(dirty);
+
+ /* Allocate space for the object header data structure */
+ if(NULL == (oh = H5FL_CALLOC(H5O_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+
+ /* File-specific, non-stored information */
+ oh->sizeof_size = H5F_SIZEOF_SIZE(udata->common.f);
+ oh->sizeof_addr = H5F_SIZEOF_ADDR(udata->common.f);
+
+ /* Decode header prefix */
+ if(H5O_decode_prefix(udata->common.f, oh, image, udata) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize object header prefix")
+
/* Compute the size of the buffer used */
buf_size = oh->chunk0_size + (size_t)H5O_SIZEOF_HDR(oh);
+ oh->swmr_write = !!(H5F_INTENT(udata->common.f) & H5F_ACC_SWMR_WRITE);
+
+ /* Create object header proxy if doing SWMR writes */
+ HDassert(!oh->proxy_present);
+ if(H5F_INTENT(udata->common.f) & H5F_ACC_SWMR_WRITE) {
+ if(H5O_proxy_create(udata->common.f, udata->common.dxpl_id, oh) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, NULL, "can't create object header proxy")
+ } /* end if */
+ else
+ oh->proxy_addr = HADDR_UNDEF;
+
/* Check to see if the buffer provided is large enough to contain both
* the prefix and the first chunk. If it isn't, make note of the desired
* size, but otherwise do nothing. H5C_load_entry() will notice the
@@ -705,9 +823,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O__cache_chk_get_load_size(const void *_udata, size_t *image_len)
+H5O__cache_chk_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5O_chk_cache_ud_t *udata = (const H5O_chk_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ const H5O_chk_cache_ud_t *udata = (const H5O_chk_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -716,11 +837,56 @@ H5O__cache_chk_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->oh);
HDassert(image_len);
- *image_len = udata->size;
+ if(image == NULL)
+ *image_len = udata->size;
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5O__cache_chk_get_load_size() */
+/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_chk_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5O__cache_chk_verify_chksum(const void *_image, size_t len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5O_chk_cache_ud_t *udata = (H5O_chk_cache_ud_t *)_udata; /* User data for callback */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* There is no checksum for version 1 */
+ if(udata->oh->version != H5O_VERSION_1) {
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_chk_verify_chksum() */
+
/*-------------------------------------------------------------------------
* Function: H5O__cache_chk_deserialize
@@ -758,6 +924,12 @@ H5O__cache_chk_deserialize(const void *image, size_t len, void *_udata,
if(NULL == (chk_proxy = H5FL_CALLOC(H5O_chunk_proxy_t)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, NULL, "memory allocation failed")
+ /* initialize the flush dependency parent fields. If needed, they
+ * will be set in the notify routine.
+ */
+ chk_proxy->fd_parent_addr = HADDR_UNDEF;
+ chk_proxy->fd_parent_ptr = NULL;
+
/* Check if we are still decoding the object header */
/* (as opposed to bringing a piece of it back from the file) */
if(udata->decoding) {
@@ -884,9 +1056,106 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O__cache_chk_serialize() */
-/**************************************/
-/* no H5O_cache_chk_notify() function */
-/**************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_chk_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 20 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5O_chunk_proxy_t *chk_proxy = (H5O_chunk_proxy_t *)_thing;
+ void *parent = NULL; /* Chunk containing continuation message that points to this chunk */
+ H5O_chunk_proxy_t *cont_chk_proxy = NULL; /* Proxy for chunk containing continuation message that points to this chunk, if not chunk 0 */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /*
+ * Check arguments.
+ */
+ HDassert(chk_proxy);
+ HDassert(chk_proxy->oh);
+
+ if(chk_proxy->oh->swmr_write) {
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Add flush dependency from chunk containing the continuation message
+ * that points to this chunk (either oh or another chunk proxy object)
+ */
+ if(chk_proxy->cont_chunkno == 0)
+ parent = chk_proxy->oh;
+ else {
+ if(NULL == (cont_chk_proxy = H5O_chunk_protect(chk_proxy->f, H5AC_ind_dxpl_id, chk_proxy->oh, chk_proxy->cont_chunkno)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
+ parent = cont_chk_proxy;
+ } /* end else */
+
+ if(H5AC_create_flush_dependency(parent, chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+ /* make note of the address and pointer of the flush
+ * dependency parent so we can take the dependency down
+ * on eviction.
+ */
+ HDassert(parent);
+ HDassert(((H5C_cache_entry_t *)parent)->magic ==
+ H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(((H5C_cache_entry_t *)parent)->type);
+ HDassert((((H5C_cache_entry_t *)(parent))->type->id
+ == H5AC_OHDR_ID) ||
+ (((H5C_cache_entry_t *)(parent))->type->id
+ == H5AC_OHDR_CHK_ID));
+
+ chk_proxy->fd_parent_addr = ((H5C_cache_entry_t *)parent)->addr;
+ chk_proxy->fd_parent_ptr = parent;
+
+
+ /* Add flush dependency on object header proxy, if proxy exists */
+ if(chk_proxy->oh->proxy_present)
+ if(H5O_proxy_depend(chk_proxy->f, H5AC_ind_dxpl_id, chk_proxy->oh, chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "can't create flush dependency on object header proxy")
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ HDassert(chk_proxy->fd_parent_addr != HADDR_UNDEF);
+ HDassert(chk_proxy->fd_parent_ptr != NULL);
+ HDassert(((H5C_cache_entry_t *)(chk_proxy->fd_parent_ptr))->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(((H5C_cache_entry_t *)(chk_proxy->fd_parent_ptr))->type);
+ HDassert((((H5C_cache_entry_t *)(chk_proxy->fd_parent_ptr))->type->id == H5AC_OHDR_ID) || (((H5C_cache_entry_t *)(chk_proxy->fd_parent_ptr))->type->id == H5AC_OHDR_CHK_ID));
+
+ if(H5AC_destroy_flush_dependency(chk_proxy->fd_parent_ptr, chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+ } /* end if */
+
+done:
+ if(cont_chk_proxy)
+ if(H5O_chunk_unprotect(chk_proxy->f, H5AC_ind_dxpl_id, cont_chk_proxy, FALSE) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_chk_notify() */
/*-------------------------------------------------------------------------
@@ -995,6 +1264,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O__cache_chk_clear() */
+
/*-------------------------------------------------------------------------
* Function: H5O__add_cont_msg
@@ -1294,17 +1564,12 @@ H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image
/* Check for correct checksum on chunks, in later versions of the format */
if(oh->version > H5O_VERSION_1) {
uint32_t stored_chksum; /* Checksum from file */
- uint32_t computed_chksum; /* Checksum computed in memory */
+
+ /* checksum verification already done in verify_chksum cb */
/* Metadata checksum */
UINT32DECODE(chunk_image, stored_chksum);
- /* Compute checksum on chunk */
- computed_chksum = H5_checksum_metadata(oh->chunk[chunkno].image, (oh->chunk[chunkno].size - H5O_SIZEOF_CHKSUM), 0);
-
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "incorrect metadata checksum for object header chunk")
} /* end if */
/* Sanity check */
@@ -1484,4 +1749,3 @@ H5O__chunk_proxy_dest(H5O_chunk_proxy_t *chk_proxy)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5O__chunk_proxy_dest() */
-
diff --git a/src/H5Ochunk.c b/src/H5Ochunk.c
index 8abe660..2ccb2f3 100644
--- a/src/H5Ochunk.c
+++ b/src/H5Ochunk.c
@@ -93,9 +93,10 @@ H5FL_DEFINE(H5O_chunk_proxy_t);
*-------------------------------------------------------------------------
*/
herr_t
-H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
+H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx,
+ unsigned cont_chunkno)
{
- H5O_chunk_proxy_t *chk_proxy = NULL; /* Proxy for chunk, to mark it dirty in the cache */
+ H5O_chunk_proxy_t *chk_proxy = NULL; /* Proxy for chunk, to mark it dirty in the cache */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, oh->cache_info.addr, FAIL)
@@ -111,8 +112,13 @@ H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
/* Set the values in the chunk proxy */
+ chk_proxy->f = f;
chk_proxy->oh = oh;
chk_proxy->chunkno = idx;
+ chk_proxy->cont_chunkno = cont_chunkno;
+
+ chk_proxy->fd_parent_addr = HADDR_UNDEF;
+ chk_proxy->fd_parent_ptr = NULL;
/* Increment reference count on object header */
if(H5O_inc_rc(oh) < 0)
@@ -121,6 +127,7 @@ H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
/* Insert the chunk proxy into the cache */
if(H5AC_insert_entry(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, chk_proxy, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "unable to cache object header chunk")
+
chk_proxy = NULL;
done:
@@ -171,6 +178,7 @@ H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINC, NULL, "can't increment reference count on object header")
/* Set chunk proxy fields */
+ chk_proxy->f = f;
chk_proxy->oh = oh;
chk_proxy->chunkno = idx;
} /* end if */
@@ -372,6 +380,7 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
{
H5O_chunk_proxy_t *chk_proxy; /* Proxy for chunk, to mark it dirty in the cache */
H5O_chk_cache_ud_t chk_udata; /* User data for loading chunk */
+ unsigned cache_flags = H5AC__DELETED_FLAG; /* Flags for unprotecting proxy */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, oh->cache_info.addr, FAIL)
@@ -397,8 +406,19 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
HDassert(chk_proxy->oh == oh);
HDassert(chk_proxy->chunkno == idx);
+ /* Update flush dependencies if doing SWMR writes */
+ if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) {
+ /* Remove flush dependency on object header proxy, if proxy exists */
+ if(oh->proxy_present)
+ if(H5O_proxy_undepend(f, dxpl_id, oh, chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "can't destroy flush dependency on object header proxy")
+ } /* end if */
+ else
+ /* Only free file space if not doing SWMR writes */
+ cache_flags |= H5AC__DIRTIED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
+
/* Release the chunk proxy from the cache, marking it deleted */
- if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, chk_proxy, (H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG)) < 0)
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, chk_proxy, cache_flags) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header chunk")
done:
diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c
index 02c72e7..aaafd72 100644
--- a/src/H5Ocopy.c
+++ b/src/H5Ocopy.c
@@ -362,20 +362,33 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/,
/* Check if the object at the address is already open in the file */
if(H5FO_opened(oloc_src->file, oloc_src->addr) != NULL) {
- H5G_loc_t tmp_loc; /* Location of object */
- H5O_loc_t tmp_oloc; /* Location of object */
- H5G_name_t tmp_path; /* Object's path */
+ H5G_loc_t tmp_loc; /* Location of object */
+ H5O_loc_t tmp_oloc; /* Location of object */
+ H5G_name_t tmp_path; /* Object's path */
+ void *obj_ptr = NULL; /* Object pointer */
+ hid_t tmp_id = -1; /* Object ID */
tmp_loc.oloc = &tmp_oloc;
tmp_loc.path = &tmp_path;
tmp_oloc.file = oloc_src->file;
tmp_oloc.addr = oloc_src->addr;
- tmp_oloc.holding_file = oloc_src->holding_file;
+ tmp_oloc.holding_file = FALSE;
H5G_name_reset(tmp_loc.path);
- /* Flush the object of this class */
- if(obj_class->flush && obj_class->flush(&tmp_loc, dxpl_id) < 0)
+ /* Get a temporary ID */
+ if((tmp_id = obj_class->open(&tmp_loc, H5P_DEFAULT, dxpl_id, FALSE)) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to open object")
+
+ /* Get object pointer */
+ obj_ptr = H5I_object(tmp_id);
+
+ /* Flush the object */
+ if(obj_class->flush && obj_class->flush(obj_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object")
+
+ /* Release the temporary ID */
+ if(tmp_id != -1 && H5I_dec_app_ref(tmp_id))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to close temporary ID")
} /* end if */
/* Get source object header */
@@ -448,6 +461,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/,
oh_dst->attr_msgs_seen = oh_src->attr_msgs_seen;
oh_dst->sizeof_size = H5F_SIZEOF_SIZE(oloc_dst->file);
oh_dst->sizeof_addr = H5F_SIZEOF_ADDR(oloc_dst->file);
+ oh_dst->swmr_write = !!(H5F_INTENT(oloc_dst->file) & H5F_ACC_SWMR_WRITE);
/* Copy time fields */
oh_dst->atime = oh_src->atime;
@@ -459,6 +473,14 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/,
oh_dst->max_compact = oh_src->max_compact;
oh_dst->min_dense = oh_src->min_dense;
+ /* Create object header proxy if doing SWMR writes */
+ if(H5F_INTENT(oloc_dst->file) & H5F_ACC_SWMR_WRITE) {
+ if(H5O_proxy_create(oloc_dst->file, dxpl_id, oh_dst) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTCREATE, FAIL, "can't create object header proxy")
+ } /* end if */
+ else
+ oh_dst->proxy_addr = HADDR_UNDEF;
+
/* Initialize size of chunk array. Start off with zero chunks so this field
* is consistent with the current state of the chunk array. This is
* important if an error occurs.
diff --git a/src/H5Oflush.c b/src/H5Oflush.c
new file mode 100644
index 0000000..e9189f8
--- /dev/null
+++ b/src/H5Oflush.c
@@ -0,0 +1,410 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Oflush.c
+ * Aug 19, 2010
+ * Mike McGreevy <mamcgree@hdfgroup.org>
+ *
+ * Purpose: Object flush/refresh routines.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Omodule.h" /* This source code file is part of the H5O module */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "H5private.h" /* Generic Functions */
+#include "H5Dprivate.h" /* Datasets */
+#include "H5Eprivate.h" /* Errors */
+#include "H5Fprivate.h" /* Files */
+#include "H5Gprivate.h" /* Groups */
+#include "H5Iprivate.h" /* IDs */
+#include "H5Opkg.h" /* Objects */
+
+/********************/
+/* Local Prototypes */
+/********************/
+static herr_t H5O_oh_tag(const H5O_loc_t *oloc, hid_t dxpl_id, haddr_t *tag);
+
+/*************/
+/* Functions */
+/*************/
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Oflush
+ *
+ * Purpose: Flushes all buffers associated with an object to disk.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Oflush(hid_t obj_id)
+{
+ H5O_loc_t *oloc; /* object location */
+ void *obj_ptr; /* Pointer to object */
+ const H5O_obj_class_t *obj_class = NULL; /* Class of object */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", obj_id);
+
+ /* Check args */
+ if(NULL == (oloc = H5O_get_loc(obj_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an object")
+
+ /* Get the object pointer */
+ if(NULL == (obj_ptr = H5I_object(obj_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid object identifier")
+
+ /* Get the object class */
+ if(NULL == (obj_class = H5O_obj_class(oloc, H5AC_dxpl_id)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to determine object class")
+
+ /* Flush the object of this class */
+ if(obj_class->flush && obj_class->flush(obj_ptr, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object")
+
+ /* Flush the object metadata and invoke flush callback */
+ if(H5O_flush_common(oloc, obj_id, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object and object flush callback")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Oflush() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_flush_common
+ *
+ * Purpose: Flushes the object's metadata
+ * Invokes the user-defined callback if there is one.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_flush_common(H5O_loc_t *oloc, hid_t obj_id, hid_t dxpl_id)
+{
+ haddr_t tag = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Retrieve tag for object */
+ if(H5O_oh_tag(oloc, dxpl_id, &tag) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object metadata")
+
+ /* Flush metadata based on tag value of the object */
+ if(H5F_flush_tagged_metadata(oloc->file, tag, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush tagged metadata")
+
+ /* Check to invoke callback */
+ if(H5F_object_flush_cb(oloc->file, obj_id) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to do object flush callback")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_flush_common() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_oh_tag
+ *
+ * Purpose: Get object header's address--tag value for the object
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O_oh_tag(const H5O_loc_t *oloc, hid_t dxpl_id, haddr_t *tag)
+{
+ H5O_t *oh = NULL; /* Object header */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Check args */
+ HDassert(oloc);
+
+ /* Get object header for object */
+ if(NULL == (oh = H5O_protect(oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object's object header")
+
+ /* Get object header's address (i.e. the tag value for this object) */
+ if(HADDR_UNDEF == (*tag = H5O_OH_GET_ADDR(oh)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to get address of object header")
+
+done:
+ /* Unprotect object header on failure */
+ if(oh && H5O_unprotect(oloc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_oh_tag() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Orefresh
+ *
+ * Purpose: Refreshes all buffers associated with an object.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * July 28, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Orefresh(hid_t oid)
+{
+ H5O_loc_t *oloc; /* object location */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", oid);
+
+ /* Check args */
+ if((oloc = H5O_get_loc(oid)) == NULL)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an object")
+
+ /* Private function */
+ if(H5O_refresh_metadata(oid, *oloc, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Orefresh() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_refresh_metadata
+ *
+ * Purpose: Refreshes all buffers associated with an object.
+ *
+ * Note: This is based on the original H5O_refresh_metadata() but
+ * is split into 2 routines.
+ * (This is done so that H5Fstart_swmr_write() can use these
+ * 2 routines to refresh opened objects. This may be
+ * restored back to the original code when H5Fstart_swmr_write()
+ * uses a different approach to handle issues with opened objects.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy/Vailin Choi
+ * July 28, 2010/Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_refresh_metadata(hid_t oid, H5O_loc_t oloc, hid_t dxpl_id)
+{
+ H5G_loc_t obj_loc;
+ H5O_loc_t obj_oloc;
+ H5G_name_t obj_path;
+ hbool_t objs_incr = FALSE; /* Whether the object count in the file was incremented */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Create empty object location */
+ obj_loc.oloc = &obj_oloc;
+ obj_loc.path = &obj_path;
+ H5G_loc_reset(&obj_loc);
+
+ /* "Fake" another open object in the file, so that it doesn't get closed
+ * if this object is the only thing holding the file open.
+ */
+ H5F_incr_nopen_objs(oloc.file);
+ objs_incr = TRUE;
+
+ /* Close object & evict its metadata */
+ if((H5O_refresh_metadata_close(oid, oloc, &obj_loc, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object")
+
+ /* Re-open the object, re-fetching its metadata */
+ if((H5O_refresh_metadata_reopen(oid, &obj_loc, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to refresh object")
+
+done:
+ if(objs_incr)
+ H5F_decr_nopen_objs(oloc.file);
+
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5O_refresh_metadata() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_refresh_metadata_close
+ *
+ * Purpose: This is the first part of the original routine H5O_refresh_metadata().
+ * (1) Save object location information.
+ * (2) Get object cork status
+ * (3) Close the object
+ * (4) Flush and evict object metadata
+ * (5) Re-cork the object if needed
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Mike McGreevy/Vailin Choi
+ * July 28, 2010/Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_refresh_metadata_close(hid_t oid, H5O_loc_t oloc, H5G_loc_t *obj_loc, hid_t dxpl_id)
+{
+ haddr_t tag = 0;
+ hbool_t corked;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Make deep local copy of object's location information */
+ if(obj_loc) {
+ H5G_loc_t tmp_loc;
+
+ H5G_loc(oid, &tmp_loc);
+ H5G_loc_copy(obj_loc, &tmp_loc, H5_COPY_DEEP);
+ } /* end if */
+
+ /* Retrieve tag for object */
+ if(H5O_oh_tag(&oloc, dxpl_id, &tag) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFLUSH, FAIL, "unable to get object header address")
+
+ /* Get cork status of the object with tag */
+ if(H5AC_cork(oloc.file, tag, H5AC__GET_CORKED, &corked) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_SYSTEM, FAIL, "unable to retrieve an object's cork status")
+
+ /* Close the object */
+ if(H5I_dec_ref(oid) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to close object")
+
+ /* Flush metadata based on tag value of the object */
+ if(H5F_flush_tagged_metadata(oloc.file, tag, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush tagged metadata")
+
+ /* Evict the object's tagged metadata */
+ if(H5F_evict_tagged_metadata(oloc.file, tag, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to evict metadata")
+
+ /* Re-cork object with tag */
+ if(corked)
+ if(H5AC_cork(oloc.file, tag, H5AC__SET_CORK, &corked) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_SYSTEM, FAIL, "unable to cork the object")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5O_refresh_metadata_close() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_refresh_metadata_reopen
+ *
+ * Purpose: This is the second part of the original routine H5O_refresh_metadata().
+ * (1) Re-open object with the saved object location information.
+ * (2) Re-register object ID with the re-opened object.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Mike McGreevy/Vailin Choi
+ * July 28, 2010/Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id)
+{
+ void *object = NULL; /* Dataset for this operation */
+ H5I_type_t type;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Get object's type */
+ type = H5I_get_type(oid);
+
+ switch(type) {
+ case(H5I_GROUP):
+
+ /* Re-open the group */
+ if(NULL == (object = H5G_open(obj_loc, dxpl_id)))
+ HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open group")
+ break;
+
+ case(H5I_DATATYPE):
+
+ /* Re-open the named datatype */
+ if(NULL == (object = H5T_open(obj_loc, dxpl_id)))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTOPENOBJ, FAIL, "unable to open named datatype")
+ break;
+
+ case(H5I_DATASET):
+
+ /* Re-open the dataset */
+ if(NULL == (object = H5D_open(obj_loc, H5P_DATASET_ACCESS_DEFAULT, dxpl_id)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open dataset")
+ break;
+
+ case(H5I_UNINIT):
+ case(H5I_BADID):
+ case(H5I_FILE):
+ case(H5I_DATASPACE):
+ case(H5I_ATTR):
+ case(H5I_REFERENCE):
+ case(H5I_VFL):
+ case(H5I_GENPROP_CLS):
+ case(H5I_GENPROP_LST):
+ case(H5I_ERROR_CLASS):
+ case(H5I_ERROR_MSG):
+ case(H5I_ERROR_STACK):
+ case(H5I_NTYPES):
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_CANTRELEASE, FAIL, "not a valid file object ID (dataset, group, or datatype)")
+ break;
+
+ } /* end switch */
+
+ /* Re-register ID for the object */
+ if((H5I_register_with_id(type, object, TRUE, oid)) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to re-register object atom")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value);
+} /* end H5O_refresh_metadata_reopen() */
+
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index 0c13498..a73a4d9 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -31,6 +31,7 @@
#include "H5MMprivate.h" /* Memory management */
#include "H5Opkg.h" /* Object headers */
#include "H5Pprivate.h" /* Property lists */
+#include "H5Sprivate.h" /* Dataspaces */
/* Local macros */
@@ -233,26 +234,134 @@ H5O__layout_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED
break;
case H5D_CHUNKED:
- /* Dimensionality */
- mesg->u.chunk.ndims = *p++;
- if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
-
- /* B-tree address */
- H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
-
- /* Chunk dimensions */
- for(u = 0; u < mesg->u.chunk.ndims; u++)
- UINT32DECODE(p, mesg->u.chunk.dim[u]);
-
- /* Compute chunk size */
- for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++)
- mesg->u.chunk.size *= mesg->u.chunk.dim[u];
-
- /* Set the chunk operations */
- /* (Only "btree" indexing type supported with v3 of message format) */
- mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
- mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
+ if(mesg->version < H5O_LAYOUT_VERSION_4) {
+ /* Set the chunked layout flags */
+ mesg->u.chunk.flags = (uint8_t)0;
+
+ /* Dimensionality */
+ mesg->u.chunk.ndims = *p++;
+ if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large")
+
+ /* B-tree address */
+ H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
+
+ /* Chunk dimensions */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT32DECODE(p, mesg->u.chunk.dim[u]);
+
+ /* Compute chunk size */
+ for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++)
+ mesg->u.chunk.size *= mesg->u.chunk.dim[u];
+
+ /* Set the chunk operations */
+ /* (Only "btree" indexing type supported with v3 of message format) */
+ mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE;
+ mesg->storage.u.chunk.ops = H5D_COPS_BTREE;
+ } /* end if */
+ else {
+ /* Get the chunked layout flags */
+ mesg->u.chunk.flags = *p++;
+
+ /* Check for valid flags */
+ /* (Currently issues an error for all non-zero values,
+ * until features are added for the flags)
+ */
+ if(mesg->u.chunk.flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad flag value for message")
+
+ /* Dimensionality */
+ mesg->u.chunk.ndims = *p++;
+ if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "dimensionality is too large")
+
+ /* Encoded # of bytes for each chunk dimension */
+ mesg->u.chunk.enc_bytes_per_dim = *p++;
+ if(mesg->u.chunk.enc_bytes_per_dim == 0 || mesg->u.chunk.enc_bytes_per_dim > 8)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "encoded chunk dimension size is too large")
+
+ /* Chunk dimensions */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT64DECODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim);
+
+ /* Compute chunk size */
+ for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++)
+ mesg->u.chunk.size *= mesg->u.chunk.dim[u];
+
+ /* Chunk index type */
+ mesg->u.chunk.idx_type = (H5D_chunk_index_t)*p++;
+ if(mesg->u.chunk.idx_type >= H5D_CHUNK_IDX_NTYPES)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "unknown chunk index type")
+ mesg->storage.u.chunk.idx_type = mesg->u.chunk.idx_type;
+
+ switch(mesg->u.chunk.idx_type) {
+ case H5D_CHUNK_IDX_BTREE:
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "v1 B-tree index type should never be in a v4 layout message")
+ break;
+
+ case H5D_CHUNK_IDX_NONE: /* Implicit Index */
+ mesg->storage.u.chunk.ops = H5D_COPS_NONE;
+ break;
+
+ case H5D_CHUNK_IDX_SINGLE: /* Single Chunk Index */
+ if(mesg->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) {
+ H5F_DECODE_LENGTH(f, p, mesg->storage.u.chunk.u.single.nbytes);
+ UINT32DECODE(p, mesg->storage.u.chunk.u.single.filter_mask);
+ }
+
+ /* Set the chunk operations */
+ mesg->storage.u.chunk.ops = H5D_COPS_SINGLE;
+ break;
+
+ case H5D_CHUNK_IDX_FARRAY:
+ /* Fixed array creation parameters */
+ mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits = *p++;
+ if(0 == mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid fixed array creation parameter")
+
+ /* Set the chunk operations */
+ mesg->storage.u.chunk.ops = H5D_COPS_FARRAY;
+ break;
+
+ case H5D_CHUNK_IDX_EARRAY:
+ /* Extensible array creation parameters */
+ mesg->u.chunk.u.earray.cparam.max_nelmts_bits = *p++;
+ if(0 == mesg->u.chunk.u.earray.cparam.max_nelmts_bits)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter")
+ mesg->u.chunk.u.earray.cparam.idx_blk_elmts = *p++;
+ if(0 == mesg->u.chunk.u.earray.cparam.idx_blk_elmts)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter")
+ mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs = *p++;
+ if(0 == mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter")
+ mesg->u.chunk.u.earray.cparam.data_blk_min_elmts = *p++;
+ if(0 == mesg->u.chunk.u.earray.cparam.data_blk_min_elmts)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter")
+ mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits = *p++;
+ if(0 == mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter")
+
+ /* Set the chunk operations */
+ mesg->storage.u.chunk.ops = H5D_COPS_EARRAY;
+ break;
+
+ case H5D_CHUNK_IDX_BT2: /* v2 B-tree index */
+ UINT32DECODE(p, mesg->u.chunk.u.btree2.cparam.node_size);
+ mesg->u.chunk.u.btree2.cparam.split_percent = *p++;
+ mesg->u.chunk.u.btree2.cparam.merge_percent = *p++;
+
+ /* Set the chunk operations */
+ mesg->storage.u.chunk.ops = H5D_COPS_BT2;
+ break;
+
+ case H5D_CHUNK_IDX_NTYPES:
+ default:
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "Invalid chunk index type")
+ } /* end switch */
+
+ /* Chunk index address */
+ H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr));
+ } /* end else */
/* Set the layout operations */
mesg->ops = H5D_LOPS_CHUNK;
@@ -457,8 +566,8 @@ H5O__layout_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p,
HDassert(p);
/* Message version */
- *p++ = mesg->type == H5D_VIRTUAL ? (uint8_t)H5O_LAYOUT_VERSION_4
- : (uint8_t)H5O_LAYOUT_VERSION_3;
+ *p++ = (uint8_t)((mesg->version < H5O_LAYOUT_VERSION_3) ?
+ H5O_LAYOUT_VERSION_3 : mesg->version);
/* Layout class */
*p++ = mesg->type;
@@ -488,16 +597,85 @@ H5O__layout_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p,
break;
case H5D_CHUNKED:
- /* Number of dimensions */
- HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
- *p++ = (uint8_t)mesg->u.chunk.ndims;
+ if(mesg->version < H5O_LAYOUT_VERSION_4) {
+ /* Number of dimensions */
+ HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ *p++ = (uint8_t)mesg->u.chunk.ndims;
+
+ /* B-tree address */
+ H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr);
+
+ /* Dimension sizes */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT32ENCODE(p, mesg->u.chunk.dim[u]);
+ } /* end if */
+ else {
+ /* Chunk feature flags */
+ *p++ = mesg->u.chunk.flags;
- /* B-tree address */
- H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr);
+ /* Number of dimensions */
+ HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
+ *p++ = (uint8_t)mesg->u.chunk.ndims;
- /* Dimension sizes */
- for(u = 0; u < mesg->u.chunk.ndims; u++)
- UINT32ENCODE(p, mesg->u.chunk.dim[u]);
+ /* Encoded # of bytes for each chunk dimension */
+ HDassert(mesg->u.chunk.enc_bytes_per_dim > 0 && mesg->u.chunk.enc_bytes_per_dim <= 8);
+ *p++ = (uint8_t)mesg->u.chunk.enc_bytes_per_dim;
+
+ /* Dimension sizes */
+ for(u = 0; u < mesg->u.chunk.ndims; u++)
+ UINT64ENCODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim);
+
+ /* Chunk index type */
+ *p++ = (uint8_t)mesg->u.chunk.idx_type;
+
+ switch(mesg->u.chunk.idx_type) {
+ case H5D_CHUNK_IDX_BTREE:
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "v1 B-tree index type should never be in a v4 layout message")
+ break;
+
+ case H5D_CHUNK_IDX_NONE: /* Implicit */
+ break;
+
+ case H5D_CHUNK_IDX_SINGLE: /* Single Chunk */
+ /* Filter information */
+ if(mesg->u.chunk.flags & H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER) {
+ H5F_ENCODE_LENGTH(f, p, mesg->storage.u.chunk.u.single.nbytes);
+ UINT32ENCODE(p, mesg->storage.u.chunk.u.single.filter_mask);
+ } /* end if */
+ break;
+
+ case H5D_CHUNK_IDX_FARRAY:
+ /* Fixed array creation parameters */
+ *p++ = mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits;
+ break;
+
+ case H5D_CHUNK_IDX_EARRAY:
+ /* Extensible array creation parameters */
+ *p++ = mesg->u.chunk.u.earray.cparam.max_nelmts_bits;
+ *p++ = mesg->u.chunk.u.earray.cparam.idx_blk_elmts;
+ *p++ = mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs;
+ *p++ = mesg->u.chunk.u.earray.cparam.data_blk_min_elmts;
+ *p++ = mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits;
+ break;
+
+ case H5D_CHUNK_IDX_BT2: /* v2 B-tree index */
+ UINT32ENCODE(p, mesg->u.chunk.u.btree2.cparam.node_size);
+ *p++ = mesg->u.chunk.u.btree2.cparam.split_percent;
+ *p++ = mesg->u.chunk.u.btree2.cparam.merge_percent;
+ break;
+
+ case H5D_CHUNK_IDX_NTYPES:
+ default:
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, FAIL, "Invalid chunk index type")
+ } /* end switch */
+
+ /*
+ * Implicit index: Address of the chunks
+ * Single chunk index: address of the single chunk
+ * Other indexes: chunk index address
+ */
+ H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr);
+ } /* end else */
break;
case H5D_VIRTUAL:
@@ -1016,20 +1194,48 @@ H5O__layout_debug(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id, const v
HDfprintf(stream, "}\n");
/* Index information */
- switch(mesg->storage.u.chunk.idx_type) {
+ switch(mesg->u.chunk.idx_type) {
case H5D_CHUNK_IDX_BTREE:
HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"Index Type:", "v1 B-tree");
- HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
- "B-tree address:", mesg->storage.u.chunk.idx_addr);
+ break;
+
+ case H5D_CHUNK_IDX_NONE:
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Index Type:", "Implicit");
+ break;
+
+ case H5D_CHUNK_IDX_SINGLE:
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Index Type:", "Single Chunk");
+ break;
+
+ case H5D_CHUNK_IDX_FARRAY:
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Index Type:", "Fixed Array");
+ /* (Should print the fixed array creation parameters) */
+ break;
+
+ case H5D_CHUNK_IDX_EARRAY:
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Index Type:", "Extensible Array");
+ /* (Should print the extensible array creation parameters) */
+ break;
+
+ case H5D_CHUNK_IDX_BT2:
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Index Type:", "v2 B-tree");
+ /* (Should print the v2-Btree creation parameters) */
break;
case H5D_CHUNK_IDX_NTYPES:
default:
HDfprintf(stream, "%*s%-*s %s (%u)\n", indent, "", fwidth,
- "Index Type:", "Unknown", (unsigned)mesg->storage.u.chunk.idx_type);
+ "Index Type:", "Unknown", (unsigned)mesg->u.chunk.idx_type);
break;
} /* end switch */
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
+ "Index address:", mesg->storage.u.chunk.idx_addr);
break;
case H5D_CONTIGUOUS:
diff --git a/src/H5Opkg.h b/src/H5Opkg.h
index 642e938..8ce2718 100644
--- a/src/H5Opkg.h
+++ b/src/H5Opkg.h
@@ -31,7 +31,7 @@
#define H5O_NMESGS 8 /*initial number of messages */
#define H5O_NCHUNKS 2 /*initial number of chunks */
#define H5O_MIN_SIZE 22 /* Min. obj header data size (must be big enough for a message prefix and a continuation message) */
-#define H5O_MSG_TYPES 25 /* # of types of messages */
+#define H5O_MSG_TYPES 26 /* # of types of messages */
#define H5O_MAX_CRT_ORDER_IDX 65535 /* Max. creation order index value */
/* Versions of object header structure */
@@ -48,6 +48,17 @@
* and 'size' callback for places to change when updating this. */
#define H5O_VERSION_LATEST H5O_VERSION_2
+/* This is the initial size of the dynamically allocated list of object
+ * header continuation chunk flush dependency parents maintained by the
+ * object header proxy.
+ *
+ * The current value of 1 presumes that the typical number of entries
+ * on this list is almost always either zero or 1. Increase this value
+ * if appropriate.
+ */
+#define H5O_FD_PAR_LIST_BASE 1
+
+
/*
* Align messages on 8-byte boundaries because we would like to copy the
* object header chunks directly into memory and operate on them there, even
@@ -66,7 +77,7 @@
#define H5O_ALIGN_OH(O, X) \
H5O_ALIGN_VERS((O)->version, X)
#define H5O_ALIGN_F(F, X) \
- H5O_ALIGN_VERS((H5F_USE_LATEST_FORMAT(F) ? H5O_VERSION_LATEST : H5O_VERSION_1), X)
+ H5O_ALIGN_VERS((H5F_USE_LATEST_FLAGS(F, H5F_LATEST_OBJ_HEADER) ? H5O_VERSION_LATEST : H5O_VERSION_1), X)
/* Size of checksum (on disk) */
#define H5O_SIZEOF_CHKSUM 4
@@ -138,7 +149,7 @@
#define H5O_SIZEOF_MSGHDR_OH(O) \
H5O_SIZEOF_MSGHDR_VERS((O)->version, (O)->flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED)
#define H5O_SIZEOF_MSGHDR_F(F, C) \
- H5O_SIZEOF_MSGHDR_VERS((H5F_USE_LATEST_FORMAT(F) || H5F_STORE_MSG_CRT_IDX(F)) ? H5O_VERSION_LATEST : H5O_VERSION_1, (C))
+ H5O_SIZEOF_MSGHDR_VERS((H5F_USE_LATEST_FLAGS(F, H5F_LATEST_OBJ_HEADER) || H5F_STORE_MSG_CRT_IDX(F)) ? H5O_VERSION_LATEST : H5O_VERSION_1, (C))
/*
* Size of chunk "header" for each chunk
@@ -267,6 +278,7 @@ struct H5O_t {
/* File-specific information (not stored) */
size_t sizeof_size; /* Size of file sizes */
size_t sizeof_addr; /* Size of file addresses */
+ hbool_t swmr_write; /* Whether we are doing SWMR writes */
/* Debugging information (not stored) */
#ifdef H5O_ENABLE_BAD_MESG_COUNT
@@ -312,6 +324,10 @@ struct H5O_t {
size_t nchunks; /*number of chunks */
size_t alloc_nchunks; /*chunks allocated */
H5O_chunk_t *chunk; /*array of chunks */
+
+ /* Object header proxy information (not stored) */
+ haddr_t proxy_addr; /* Temporary address of object header proxy */
+ hbool_t proxy_present; /* Whether the proxy is present in cache (and we have to track dependencies) */
};
/* Class for types of objects in file */
@@ -325,7 +341,7 @@ typedef struct H5O_obj_class_t {
void *(*create)(H5F_t *, void *, H5G_loc_t *, hid_t ); /*create an object of this class */
H5O_loc_t *(*get_oloc)(hid_t ); /*get the object header location for an object */
herr_t (*bh_info)(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info); /*get the index & heap info for an object */
- herr_t (*flush)(H5G_loc_t *loc, hid_t dxpl_id); /*flush an opened object of this class */
+ herr_t (*flush)(void *obj_ptr, hid_t dxpl_id); /*flush an opened object of this class */
} H5O_obj_class_t;
/* Node in skip list to map addresses from one file to another during object header copy */
@@ -360,6 +376,9 @@ typedef struct H5O_common_cache_ud_t {
typedef struct H5O_cache_ud_t {
hbool_t made_attempt; /* Whether the deserialize routine was already attempted */
unsigned v1_pfx_nmesgs; /* Number of messages from v1 prefix header */
+ uint8_t version; /* Version number obtained in get_load_size callback.
+ * It will be used later in verify_chksum callback
+ */
H5O_common_cache_ud_t common; /* Common object header cache callback info */
} H5O_cache_ud_t;
@@ -368,8 +387,34 @@ typedef struct H5O_chunk_proxy_t {
H5AC_info_t cache_info; /* Information for metadata cache functions, _must_ be */
/* first field in structure */
+ H5F_t *f; /* Pointer to file for object header/chunk */
H5O_t *oh; /* Object header for this chunk */
unsigned chunkno; /* Chunk number for this chunk */
+ unsigned cont_chunkno; /* Chunk number for the chunk containing the continuation message that points to this chunk */
+
+ /* Flush depencency parent information (not stored)
+ *
+ * The following fields are used to store the base address and a pointer
+ * to the in core representation of the chunk proxy's flush dependency
+ * parent -- if it exists. If it does not exist, these fields will
+ * contain HADDR_UNDEF and NULL respectively.
+ *
+ * If the file is opened in SWMR write mode, the flush dependency
+ * parent of the chunk proxy will be either its object header
+ * (if cont_chunkno == 0) or the chunk proxy indicated by the
+ * cont_chunkno field (if cont_chunkno > 0).
+ *
+ * Note that the flush dependency parent address is maintained purely
+ * for sanity checking. Once we are reasonably confident of the code,
+ * it can be deleted or be maintained only in debug mode.
+ */
+ haddr_t fd_parent_addr; /* Address of flush dependency parent
+ * if any. This field is initialized
+ * to HADDR_UNDEF.
+ */
+ void * fd_parent_ptr; /* pointer to flush dependency parent
+ * it it exists. NULL otherwise.
+ */
} H5O_chunk_proxy_t;
/* Callback information for loading object header chunk from disk */
@@ -381,6 +426,65 @@ typedef struct H5O_chk_cache_ud_t {
H5O_common_cache_ud_t common; /* Common object header cache callback info */
} H5O_chk_cache_ud_t;
+/* Metadata cache object header proxy type */
+struct H5O_proxy_t {
+ H5AC_info_t cache_info; /* Information for H5AC cache functions, _must_ be */
+ /* first field in structure */
+ H5F_t *f; /* Pointer to file for object header/chunk */
+ H5O_t *oh; /* Object header */
+
+ /* Flush depencency parent information (not stored)
+ *
+ * The following fields are used to store base addresses and pointers
+ * to the in core representations of the object header proxy's flush
+ * dependency parents -- if they exist.
+ *
+ * At present, object header proxies may have two types of parents:
+ *
+ * 1) Exactly one object header.
+ *
+ * 2) Zero or more object header continuation chunks.
+ *
+ * The base address and pointer to the object header flush dependency
+ * parent are stored in the oh_fd_parent_addr and oh_fd_parent_ptr fields.
+ * These fields are set to HADDR_UNDEF and NULL if there is no object
+ * header flush dependency parent. Note that when defined,
+ * oh_fd_parent_ptr should point to the same object as oh.
+ *
+ * The number of object header continuation chunks (H5O_chunk_proxy_t)
+ * that are flush dependency parents of the object header proxy is stored
+ * in chk_fd_parent_count.
+ *
+ * If this field is greater than zero, chk_fd_parent_addrs must point to
+ * a dynamically allocated array of haddr_t of length chk_fd_parent_alloc,
+ * and chk_fd_parent_ptrs must point to a dynamically allocated array of
+ * void * of the same length. These arrays are used to store the base
+ * addresses and pointers to the object header continuation chunk flush
+ * dependency parents of the object header proxy. chk_fd_parent_alloc
+ * must always be greater than or equal to chk_fd_parent_count.
+ *
+ * If chk_fd_parent_count is zero, chk_fd_parent_addrs and
+ * chk_fd_parent_ptrs must be NULL.
+ *
+ * Note that the flush dependency parent addresses are maintined
+ * purely for sanity checking. Once we are confident of the code,
+ * these fields and their supporting code can be either deleted
+ * on maintained only in debug builds.
+ */
+ haddr_t oh_fd_parent_addr;
+ void * oh_fd_parent_ptr;
+
+ unsigned chk_fd_parent_count;
+ unsigned chk_fd_parent_alloc;
+ haddr_t *chk_fd_parent_addrs;
+ void **chk_fd_parent_ptrs;
+};
+
+/* Callback information for loading object header proxy */
+typedef struct H5O_proxy_cache_ud_t {
+ H5F_t *f; /* Pointer to file for object header/chunk */
+ H5O_t *oh; /* Object header for this chunk */
+} H5O_proxy_cache_ud_t;
/* H5O object header inherits cache-like properties from H5AC */
H5_DLLVAR const H5AC_class_t H5AC_OHDR[1];
@@ -556,7 +660,8 @@ H5_DLL herr_t H5O_msg_iterate_real(H5F_t *f, H5O_t *oh, const H5O_msg_class_t *t
const H5O_mesg_operator_t *op, void *op_data, hid_t dxpl_id);
/* Object header chunk routines */
-H5_DLL herr_t H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx);
+H5_DLL herr_t H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx,
+ unsigned cont_chunkno);
H5_DLL H5O_chunk_proxy_t *H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
unsigned idx);
H5_DLL herr_t H5O_chunk_unprotect(H5F_t *f, hid_t dxpl_id,
@@ -604,9 +709,12 @@ H5_DLL herr_t H5O_attr_link(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, void *_mesg
H5_DLL herr_t H5O_attr_count_real(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
hsize_t *nattrs);
-
-/* These functions operate on object locations */
-H5_DLL H5O_loc_t *H5O_get_loc(hid_t id);
+/* Object header proxy operators */
+H5_DLL herr_t H5O_proxy_create(H5F_t *f, hid_t dxpl_id, H5O_t *oh);
+H5_DLL H5O_proxy_t *H5O_proxy_pin(H5F_t *f, hid_t dxpl_id, H5O_t *oh);
+H5_DLL herr_t H5O_proxy_unpin(H5O_proxy_t *proxy);
+H5_DLL herr_t H5O_proxy_depend(H5F_t *f, hid_t dxpl_id, H5O_t *oh, void *parent);
+H5_DLL herr_t H5O_proxy_undepend(H5F_t *f, hid_t dxpl_id, H5O_t *oh, void *parent);
/* Testing functions */
#ifdef H5O_TESTING
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index 0d77a83..5cba610 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -47,6 +47,7 @@
typedef struct H5O_msg_class_t H5O_msg_class_t;
typedef struct H5O_mesg_t H5O_mesg_t;
typedef struct H5O_t H5O_t;
+typedef struct H5O_proxy_t H5O_proxy_t;
/* Values used to create the shared message & attribute heaps */
/* (Note that these parameters have been tuned so that the resulting heap ID
@@ -360,6 +361,14 @@ typedef struct H5O_efl_t {
*/
#define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1)
+/* Flags for chunked layout feature encoding */
+#define H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS 0x01
+#define H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER 0x02
+#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \
+ H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \
+ | H5O_LAYOUT_CHUNK_SINGLE_INDEX_WITH_FILTER \
+ )
+
/* Initial version of the layout information. Used when space is allocated */
#define H5O_LAYOUT_VERSION_1 1
@@ -403,12 +412,46 @@ typedef struct H5O_storage_chunk_btree_t {
H5UC_t *shared; /* Ref-counted shared info for B-tree nodes */
} H5O_storage_chunk_btree_t;
+/* Forward declaration of structs used below */
+struct H5FA_t; /* Defined in H5FAprivate.h */
+
+typedef struct H5O_storage_chunk_farray_t {
+ haddr_t dset_ohdr_addr; /* File address dataset's object header */
+ struct H5FA_t *fa; /* Pointer to fixed index array struct */
+} H5O_storage_chunk_farray_t;
+
+/* Forward declaration of structs used below */
+struct H5EA_t; /* Defined in H5EAprivate.h */
+
+typedef struct H5O_storage_chunk_earray_t {
+ haddr_t dset_ohdr_addr; /* File address dataset's object header */
+ struct H5EA_t *ea; /* Pointer to extensible index array struct */
+} H5O_storage_chunk_earray_t;
+
+/* Filtered info for single chunk index */
+typedef struct H5O_storage_chunk_single_filt_t {
+ uint32_t nbytes; /* Size of chunk (in file) */
+ uint32_t filter_mask; /* Excluded filters for chunk */
+} H5O_storage_chunk_single_filt_t;
+
+/* Forward declaration of structs used below */
+struct H5B2_t; /* Defined in H5B2pkg.h */
+
+typedef struct H5O_storage_chunk_bt2_t {
+ haddr_t dset_ohdr_addr; /* File address dataset's object header */
+ struct H5B2_t *bt2; /* Pointer to b-tree 2 struct */
+} H5O_storage_chunk_bt2_t;
+
typedef struct H5O_storage_chunk_t {
H5D_chunk_index_t idx_type; /* Type of chunk index */
haddr_t idx_addr; /* File address of chunk index */
const struct H5D_chunk_ops_t *ops; /* Pointer to chunked storage operations */
union {
H5O_storage_chunk_btree_t btree; /* Information for v1 B-tree index */
+ H5O_storage_chunk_bt2_t btree2; /* Information for v2 B-tree index */
+ H5O_storage_chunk_earray_t earray; /* Information for extensible array index */
+ H5O_storage_chunk_farray_t farray; /* Information for fixed array index */
+ H5O_storage_chunk_single_filt_t single; /* Information for single chunk w/ filters index */
} u;
} H5O_storage_chunk_t;
@@ -503,13 +546,57 @@ typedef struct H5O_storage_t {
} u;
} H5O_storage_t;
+typedef struct H5O_layout_chunk_farray_t {
+ /* Creation parameters for fixed array data structure */
+ struct {
+ uint8_t max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in a data block page) -
+ i.e. # of bits needed to store max. # of elements
+ in a data block page */
+ } cparam;
+} H5O_layout_chunk_farray_t;
+
+typedef struct H5O_layout_chunk_earray_t {
+ /* Creation parameters for extensible array data structure */
+ struct {
+ uint8_t max_nelmts_bits; /* Log2(Max. # of elements in array) - i.e. # of bits needed to store max. # of elements */
+ uint8_t idx_blk_elmts; /* # of elements to store in index block */
+ uint8_t data_blk_min_elmts; /* Min. # of elements per data block */
+ uint8_t sup_blk_min_data_ptrs; /* Min. # of data block pointers for a super block */
+ uint8_t max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */
+ } cparam;
+
+ unsigned unlim_dim; /* Rank of unlimited dimension for dataset */
+ uint32_t swizzled_dim[H5O_LAYOUT_NDIMS]; /* swizzled chunk dimensions */
+ hsize_t swizzled_down_chunks[H5O_LAYOUT_NDIMS]; /* swizzled "down" size of number of chunks in each dimension */
+} H5O_layout_chunk_earray_t;
+
+typedef struct H5O_layout_chunk_bt2_t {
+ /* Creation parameters for v2 B-tree data structure */
+ struct {
+ uint32_t node_size; /* Size of each node (in bytes) */
+ uint8_t split_percent; /* % full to split nodes */
+ uint8_t merge_percent; /* % full to merge nodes */
+ } cparam;
+} H5O_layout_chunk_bt2_t;
+
typedef struct H5O_layout_chunk_t {
+ H5D_chunk_index_t idx_type; /* Type of chunk index */
+ uint8_t flags; /* Chunk layout flags */
unsigned ndims; /* Num dimensions in chunk */
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
+ unsigned enc_bytes_per_dim; /* Encoded # of bytes for storing each chunk dimension */
uint32_t size; /* Size of chunk in bytes */
hsize_t nchunks; /* Number of chunks in dataset */
+ hsize_t max_nchunks; /* Max. number of chunks in dataset */
hsize_t chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in each dataset dimension */
+ hsize_t max_chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in each dataset's max. dimension */
hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
+ hsize_t max_down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each max dim */
+ union {
+ H5O_layout_chunk_farray_t farray; /* Information for fixed array index */
+ H5O_layout_chunk_earray_t earray; /* Information for extensible array index */
+ H5O_layout_chunk_bt2_t btree2; /* Information for v2 B-tree index */
+ } u;
} H5O_layout_chunk_t;
typedef struct H5O_layout_t {
@@ -764,6 +851,10 @@ H5_DLL herr_t H5O_get_nlinks(const H5O_loc_t *loc, hid_t dxpl_id, hsize_t *nlink
H5_DLL void *H5O_obj_create(H5F_t *f, H5O_type_t obj_type, void *crt_info, H5G_loc_t *obj_loc, hid_t dxpl_id);
H5_DLL haddr_t H5O_get_oh_addr(const H5O_t *oh);
H5_DLL herr_t H5O_get_rc_and_type(const H5O_loc_t *oloc, hid_t dxpl_id, unsigned *rc, H5O_type_t *otype);
+H5_DLL H5O_proxy_t *H5O_pin_flush_dep_proxy(H5O_loc_t *loc, hid_t dxpl_id);
+H5_DLL H5O_proxy_t *H5O_pin_flush_dep_proxy_oh(H5F_t *f, hid_t dxpl_id,
+ H5O_t *oh);
+H5_DLL herr_t H5O_unpin_flush_dep_proxy(H5O_proxy_t *proxy);
/* Object header message routines */
H5_DLL herr_t H5O_msg_create(const H5O_loc_t *loc, unsigned type_id, unsigned mesg_flags,
@@ -814,6 +905,12 @@ H5_DLL int H5O_msg_get_chunkno(const H5O_loc_t *loc, unsigned type_id, hid_t dxp
H5_DLL herr_t H5O_msg_lock(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id);
H5_DLL herr_t H5O_msg_unlock(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id);
+/* Object metadata flush/refresh routines */
+H5_DLL herr_t H5O_flush_common(H5O_loc_t *oloc, hid_t obj_id, hid_t dxpl_id);
+H5_DLL herr_t H5O_refresh_metadata(hid_t oid, H5O_loc_t oloc, hid_t dxpl_id);
+H5_DLL herr_t H5O_refresh_metadata_close(hid_t oid, H5O_loc_t oloc, H5G_loc_t *obj_loc, hid_t dxpl_id);
+H5_DLL herr_t H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id);
+
/* Object copying routines */
H5_DLL herr_t H5O_copy_header_map(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out */,
hid_t dxpl_id, H5O_copy_t *cpy_info, hbool_t inc_depth,
@@ -832,6 +929,13 @@ H5_DLL herr_t H5O_loc_reset(H5O_loc_t *loc);
H5_DLL herr_t H5O_loc_copy(H5O_loc_t *dst, H5O_loc_t *src, H5_copy_depth_t depth);
H5_DLL herr_t H5O_loc_hold_file(H5O_loc_t *loc);
H5_DLL herr_t H5O_loc_free(H5O_loc_t *loc);
+H5_DLL H5O_loc_t *H5O_get_loc(hid_t id);
+
+/* Storage operators */
+H5_DLL void *H5O_storage_copy(const void *mesg, void *dest);
+H5_DLL herr_t H5O_storage_reset(void *mesg);
+H5_DLL size_t H5O_storage_meta_size(const H5F_t *f, const H5O_storage_t *storage,
+ hbool_t include_compact_data);
/* EFL operators */
H5_DLL hsize_t H5O_efl_total_size(H5O_efl_t *efl);
diff --git a/src/H5Oproxy.c b/src/H5Oproxy.c
new file mode 100644
index 0000000..811e73e
--- /dev/null
+++ b/src/H5Oproxy.c
@@ -0,0 +1,952 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Oproxy.c
+ * February 28 2012
+ * Neil Fortner <nfortne2@hdfgroup.org>
+ *
+ * Purpose: Implement object header's metadata cache proxy cache
+ * methods.
+ *
+ * Note: Object header proxies exist only to make integrating the
+ * object header chunks with the metadata cache's flush
+ * dependencies easier and less coupled than directly tying
+ * them together.
+ *
+ * Object header proxies never exist on disk (hence their
+ * lack of a 'load' callback) and their 'flush' callback
+ * does nothing.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Omodule.h" /* This source code file is part of the H5O module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Opkg.h" /* Object headers */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File memory management */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Metadata cache (H5AC) callbacks */
+static herr_t H5O__cache_proxy_get_load_size(const void *image, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static void *H5O__cache_proxy_deserialize(const void *image, size_t len, void *udata, hbool_t *dirty);
+static herr_t H5O__cache_proxy_image_len(const void *thing, size_t *image_len, hbool_t *compressed_ptr,
+ size_t *compressed_image_len_ptr);
+static herr_t H5O__cache_proxy_serialize(const H5F_t *f, void *image, size_t len, void *thing);
+static herr_t H5O__cache_proxy_notify(H5AC_notify_action_t action, void *thing);
+static herr_t H5O__cache_proxy_free_icr(void *thing);
+
+static herr_t H5O_proxy_depend_core(void *parent, H5O_proxy_t *proxy);
+static herr_t H5O__cache_proxy_dest(H5O_proxy_t *proxy);
+static herr_t H5O_proxy_undepend_core(void *parent, H5O_proxy_t *proxy);
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+const H5AC_class_t H5AC_OHDR_PROXY[1] = {{
+ H5AC_OHDR_PROXY_ID, /* Metadata client ID */
+ "Object header proxy", /* Metadata client name (for debugging) */
+ H5FD_MEM_OHDR, /* File space memory type for client */
+ H5AC__CLASS_SKIP_READS|H5AC__CLASS_SKIP_WRITES, /* Client class behavior flags */
+ H5O__cache_proxy_get_load_size, /* 'get_load_size' callback */
+ NULL, /* 'verify_chksum' callback */
+ H5O__cache_proxy_deserialize, /* 'deserialize' callback */
+ H5O__cache_proxy_image_len, /* 'image_len' callback */
+ NULL, /* 'pre_serialize' callback */
+ H5O__cache_proxy_serialize, /* 'serialize' callback */
+ H5O__cache_proxy_notify, /* 'notify' callback */
+ H5O__cache_proxy_free_icr, /* 'free_icr' callback */
+ NULL, /* 'clear' callback */
+ NULL, /* 'fsf_size' callback */
+}};
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* Declare a free list to manage H5O_proxy_t objects */
+H5FL_DEFINE_STATIC(H5O_proxy_t);
+
+/* Declare a free list to manage flush dependency parent addr arrays.
+ * Note that this array is used purely for sanity checking -- once we
+ * are pretty sure the code is working properly, it can be removed.
+ *
+ * JRM -- 12/10/15
+ */
+H5FL_BLK_DEFINE_STATIC(parent_addr);
+
+/* Declare a free list to manage flush dependency parent ptr arrays */
+H5FL_BLK_DEFINE_STATIC(parent_ptr);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_proxy_get_load_size
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_proxy_get_load_size(const void *_image, void H5_ATTR_UNUSED *_udata,
+ size_t *image_len, size_t H5_ATTR_UNUSED *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr,
+ size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image_len);
+
+ if(image == NULL) {
+ /* Set the image length size */
+ /* Object header proxies are represented as 1 byte in cache */
+ /* (would be 0 bytes, but cache won't allow it currently. See
+ * H5D_cache_proxy_size) */
+ *image_len = 1;
+ }
+ /* Nothing to do for non-NULL image: no need to compute actual_len */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__cache_proxy_get_load_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_proxy_deserialize
+ *
+ * Purpose: Creates an object header proxy and creates flush
+ * dependencies from all object header chunks on this proxy.
+ *
+ * Return: Success: Pointer to a new object header proxy
+ * Failure: NULL
+ *
+ * Programmer: Neil Fortner
+ * Mar 15 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5O__cache_proxy_deserialize(const void H5_ATTR_UNUSED *_image, size_t H5_ATTR_UNUSED len, void *_udata,
+ hbool_t H5_ATTR_UNUSED *dirty)
+{
+ H5O_proxy_t *proxy = NULL; /* Object header proxy */
+ H5O_proxy_cache_ud_t *udata = (H5O_proxy_cache_ud_t *)_udata; /* User data for callback */
+ H5O_proxy_t *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* check arguments */
+ HDassert(udata);
+
+ /* Create object header proxy object & initialize fields */
+ if(NULL == (proxy = H5FL_MALLOC(H5O_proxy_t)))
+ HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "can't allocate object header proxy")
+ HDmemset(&proxy->cache_info, 0, sizeof(H5AC_info_t));
+
+ proxy->f = udata->f;
+ proxy->oh = udata->oh;
+
+ proxy->oh_fd_parent_addr = HADDR_UNDEF;
+ proxy->oh_fd_parent_ptr = NULL;
+
+ proxy->chk_fd_parent_count = 0;
+ proxy->chk_fd_parent_alloc = 0;
+ proxy->chk_fd_parent_addrs = NULL;
+ proxy->chk_fd_parent_ptrs = NULL;
+
+ /* Set return value */
+ ret_value = proxy;
+
+done:
+ if(!ret_value && proxy)
+ if(H5O__cache_proxy_dest(proxy) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, NULL, "unable to destroy object header proxy")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_proxy_deserialize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5B2__cache_hdr_image_len
+ *
+ * Purpose: Compute the size of the data structure on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 20, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_proxy_image_len(const void H5_ATTR_UNUSED *_thing, size_t *image_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image_len);
+
+ /* Set the image length size */
+ /* Object header proxies are represented as 1 byte in cache */
+ /* (would be 0 bytes, but cache won't allow it currently. See
+ * H5D_cache_proxy_size) */
+ *image_len = 1;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__cache_proxy_image_len() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_proxy_serialize
+ *
+ * Purpose: A no-op
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@ncsa.uiuc.edu
+ * Feb 2 2005
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_proxy_serialize(const H5F_t H5_ATTR_UNUSED *f, void H5_ATTR_UNUSED *_image, size_t H5_ATTR_UNUSED len,
+ void H5_ATTR_UNUSED *_thing)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* A no-op */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5O__cache_proxy_serialize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_proxy_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * nfortne2@hdfgroup.org
+ * Apr 25 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_proxy_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5O_proxy_t *proxy = (H5O_proxy_t *)_thing;
+ H5O_chunk_proxy_t *chk_proxy = NULL; /* Object header chunk proxy */
+ unsigned i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /*
+ * Check arguments.
+ */
+ HDassert(proxy);
+ HDassert(proxy->f);
+ HDassert(proxy->oh);
+ HDassert(proxy->oh->swmr_write);
+
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ /* Create flush dependency on object header chunk 0 */
+ if(H5O_proxy_depend_core(proxy->oh, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+ /* Create flush dependencies on all other object header chunks */
+ for(i = 1; i < proxy->oh->nchunks; i++) {
+ if(NULL == (chk_proxy = H5O_chunk_protect(proxy->f, H5AC_ind_dxpl_id, proxy->oh, i)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
+/* same as before, but looks backward...need to check into that..(proxy, chk_proxy) */
+ if(H5O_proxy_depend_core(chk_proxy, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+ if(H5O_chunk_unprotect(proxy->f, H5AC_ind_dxpl_id, chk_proxy, FALSE) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
+ chk_proxy = NULL;
+ } /* end for */
+
+ /* Mark proxy as present on the object header */
+ proxy->oh->proxy_present = TRUE;
+
+ break;
+
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ HDassert(proxy->oh_fd_parent_addr != HADDR_UNDEF);
+ HDassert(proxy->oh_fd_parent_ptr != NULL);
+
+ if(H5O_proxy_undepend_core(proxy->oh_fd_parent_ptr, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency with object header")
+
+ while(proxy->chk_fd_parent_count > 0){
+
+ i = proxy->chk_fd_parent_count - 1;
+
+ if(H5O_proxy_undepend_core(proxy->chk_fd_parent_ptrs[i], proxy)<0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency with object header continuation chunk")
+ }
+
+ /* Mark proxy as not present on the object header */
+ proxy->oh->proxy_present = FALSE;
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+
+done:
+ if(chk_proxy) {
+ HDassert(ret_value < 0);
+ if(H5O_chunk_unprotect(proxy->f, H5AC_ind_dxpl_id, chk_proxy, FALSE) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5O__cache_proxy_notify() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__cache_proxy_free_icr
+ *
+ * Purpose: Destroy/release an "in core representation" of a data
+ * structure
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 15 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_proxy_free_icr(void *_thing)
+{
+ H5O_proxy_t *proxy = (H5O_proxy_t *)_thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ if(H5O__cache_proxy_dest(proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header proxy")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5O__cache_proxy_free_icr() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_cache_proxy_dest
+ *
+ * Purpose: Destroys an object header proxy in memory.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 15 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__cache_proxy_dest(H5O_proxy_t *proxy)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(proxy);
+ HDassert(proxy->oh);
+
+ /* Free the object header proxy object */
+ proxy = H5FL_FREE(H5O_proxy_t, proxy);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__cache_proxy_dest() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_create
+ *
+ * Purpose: Allocate temporary space for the object header proxy. We
+ * save the actual creation of the proxy to the "load"
+ * callback, to save time for objects that do not have any
+ * flush dependency children and to simplify the code. The
+ * load callback needs to set up the flush dependencies
+ * anyways because the proxy cannot stay pinned by the object
+ * header or it would be imnpossible to evict the two due to
+ * the circular dependencies.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 15 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_proxy_create(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t *oh)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f);
+ HDassert(oh);
+
+ /* Get a temp. address for object header proxy */
+ if(HADDR_UNDEF == (oh->proxy_addr = H5MF_alloc_tmp(f, (hsize_t)1)))
+ HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, FAIL, "file allocation failed for object header proxy")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_proxy_create() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_pin
+ *
+ * Purpose: Returns the proxy object for the specified object header,
+ * pinned. This proxy can be used as a flush dependency
+ * parent for items that depend on this object header.
+ *
+ * Return: Success: Pointer to a pinned object header proxy
+ * Failure: NULL
+ *
+ * Programmer: Neil Fortner
+ * Mar 15 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+H5O_proxy_t *
+H5O_proxy_pin(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
+{
+ H5O_proxy_t *proxy = NULL; /* Object header proxy */
+ H5O_proxy_cache_ud_t udata; /* User-data for callback */
+ H5O_proxy_t *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(f);
+ HDassert(oh);
+ HDassert(H5F_addr_defined(oh->proxy_addr));
+
+ udata.f = f;
+ udata.oh = oh;
+ /* Protect the object header proxy */
+ if(NULL == (proxy = (H5O_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, &udata, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header proxy");
+
+ /* Unprotect the object header proxy and pin it */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, proxy, H5AC__PIN_ENTRY_FLAG) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to unprotect object header proxy");
+
+ /* Set return value */
+ ret_value = proxy;
+ proxy = NULL;
+
+done:
+ if(proxy) {
+ HDassert(!ret_value);
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, proxy, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to unprotect object header proxy");
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_proxy_pin() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_unpin
+ *
+ * Purpose: Unpins the specified object header proxy from the cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 15 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_proxy_unpin(H5O_proxy_t *proxy)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(proxy);
+
+ /* Unpin the object header proxy */
+ if(H5AC_unpin_entry(proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPIN, FAIL, "unable to unpin object header proxy")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_proxy_unpin() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_depend
+ *
+ * Purpose: Creates a flush dependency between the object header proxy
+ * (as child) and the specified object (as parent).
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 20 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_proxy_depend(H5F_t *f, hid_t dxpl_id, H5O_t *oh, void *parent)
+{
+ H5O_proxy_t *proxy = NULL; /* Object header proxy */
+ H5O_proxy_cache_ud_t udata; /* User-data for callback */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(f);
+ HDassert(oh);
+
+ HDassert(H5F_addr_defined(oh->proxy_addr));
+ HDassert(parent);
+ HDassert(((H5C_cache_entry_t *)parent)->magic ==
+ H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(((H5C_cache_entry_t *)parent)->type);
+ HDassert((((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) ||
+ (((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_CHK_ID));
+
+ udata.f = f;
+ udata.oh = oh;
+ /* Protect the object header proxy */
+ if(NULL == (proxy = (H5O_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, &udata, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header proxy");
+
+ /* Add the flush dependency on the parent object */
+ if(H5O_proxy_depend_core(parent, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+ /* Unprotect the object header proxy */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, proxy, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header proxy");
+ proxy = NULL;
+
+done:
+ if(proxy) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, proxy, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header proxy");
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_proxy_depend() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_depend_core
+ *
+ * Purpose: Creates a flush dependency between the object header proxy
+ * (as child) and the specified object (as parent).
+ *
+ * This function accepts a pointer to the proxy, which it
+ * assumes is somehow locked in the cache. In general, this
+ * means the proxy is protected, however, this is not necessary
+ * when called from the proxy notify routine.
+ *
+ * It also handles the book keeping required to track the flush
+ * dependency parent of the object header proxy.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 12/08/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O_proxy_depend_core(void *parent, H5O_proxy_t *proxy)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(parent);
+ HDassert(((H5C_cache_entry_t *)parent)->magic ==
+ H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(((H5C_cache_entry_t *)parent)->type);
+ HDassert((((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) ||
+ (((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_CHK_ID));
+ HDassert(proxy);
+ HDassert(proxy->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(proxy->cache_info.type);
+ HDassert(proxy->cache_info.type->id == H5AC_OHDR_PROXY_ID);
+
+ /* Add the flush dependency on the parent object */
+ if(H5AC_create_flush_dependency(parent, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDEPEND, FAIL, "unable to create flush dependency")
+
+ /* make record of the flush dependency relationship */
+ if(((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) {
+
+ HDassert(proxy->oh_fd_parent_addr == HADDR_UNDEF);
+ HDassert(proxy->oh_fd_parent_ptr == NULL);
+
+ proxy->oh_fd_parent_addr = ((H5C_cache_entry_t *)parent)->addr;
+ proxy->oh_fd_parent_ptr = parent;
+
+ } else {
+
+ HDassert(((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_CHK_ID);
+
+ /* check to see if we need to resize the chunk fd parent arrays */
+ if ( proxy->chk_fd_parent_count >= proxy->chk_fd_parent_alloc ) {
+
+ if ( proxy->chk_fd_parent_alloc == 0 ) {
+
+ /* must allocate arrays */
+ if ( NULL == (proxy->chk_fd_parent_addrs = (haddr_t *)
+ H5FL_BLK_MALLOC(parent_addr,
+ H5O_FD_PAR_LIST_BASE * sizeof(haddr_t))) )
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent addr list")
+
+ if ( NULL == (proxy->chk_fd_parent_ptrs = (void **)
+ H5FL_BLK_MALLOC(parent_ptr,
+ H5O_FD_PAR_LIST_BASE * sizeof(void *))) )
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent ptr list")
+
+ proxy->chk_fd_parent_alloc = H5O_FD_PAR_LIST_BASE;
+
+ } else {
+
+ /* resize existing arrays */
+ HDassert(proxy->chk_fd_parent_addrs);
+ HDassert(proxy->chk_fd_parent_ptrs);
+
+ if ( NULL == (proxy->chk_fd_parent_addrs = (haddr_t *)
+ H5FL_BLK_REALLOC(parent_addr,
+ proxy->chk_fd_parent_addrs,
+ 2 * proxy->chk_fd_parent_alloc *
+ sizeof(haddr_t))) )
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for flush dependency parent addr list")
+
+ if ( NULL == (proxy->chk_fd_parent_ptrs = (void **)
+ H5FL_BLK_REALLOC(parent_ptr,
+ proxy->chk_fd_parent_ptrs,
+ 2 * proxy->chk_fd_parent_alloc *
+ sizeof(void *))) )
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for flush dependency parent ptr list")
+
+ proxy->chk_fd_parent_alloc *= 2;
+ }
+ }
+
+ HDassert(proxy->chk_fd_parent_count < proxy->chk_fd_parent_alloc);
+
+ (proxy->chk_fd_parent_addrs)[proxy->chk_fd_parent_count] =
+ ((H5C_cache_entry_t *)(parent))->addr;
+
+ (proxy->chk_fd_parent_ptrs)[proxy->chk_fd_parent_count] = parent;
+
+ (proxy->chk_fd_parent_count)++;
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5O_proxy_depend_core() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_undepend
+ *
+ * Purpose: Destroys the flush dependency between the object header
+ * proxy (as child) and the specified object (as parent).
+ *
+ * Update: This function also seems to be used to delete
+ * flush dependencies between object header proxy (as child)
+ * and object header chunk continuations (as parent)
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Mar 20 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_proxy_undepend(H5F_t *f, hid_t dxpl_id, H5O_t *oh, void *parent)
+{
+ H5O_proxy_t *proxy = NULL; /* Object header proxy */
+ H5O_proxy_cache_ud_t udata; /* User-data for callback */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(f);
+ HDassert(oh);
+ HDassert(H5F_addr_defined(oh->proxy_addr));
+ HDassert(parent);
+
+ udata.f = f;
+ udata.oh = oh;
+ /* Protect the object header proxy */
+ if(NULL == (proxy = (H5O_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, &udata, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header proxy");
+
+ /* destroy the flush dependency on the parent object */
+ if(H5O_proxy_undepend_core(parent, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+ /* Unprotect the object header proxy */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, proxy, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header proxy");
+ proxy = NULL;
+
+done:
+ if(proxy) {
+ HDassert(ret_value < 0);
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_PROXY, oh->proxy_addr, proxy, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header proxy");
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_proxy_undepend() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_proxy_undepend_core
+ *
+ * Purpose: Destroys the flush dependency between the object header
+ * proxy (as child) and the specified object (as parent).
+ *
+ * Also update records of the flush dependency parents of
+ * the object header proxy so that they can be taken down
+ * on notification of object header proxy eviction.
+ *
+ * Unlike H5O_proxy_undepend, this function does not
+ * protect and unprotect the object header proxy. Instead,
+ * it takes a pointer to the object header proxy as a
+ * parameter, and presumes that the proxy is protected or
+ * otherwise locked in the metadata cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 12/8/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O_proxy_undepend_core(void *parent, H5O_proxy_t *proxy)
+{
+ unsigned i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(parent);
+
+#ifndef NDEBUG
+ /* do sanity checks */
+ HDassert(parent);
+ HDassert(((H5C_cache_entry_t *)parent)->magic ==
+ H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(((H5C_cache_entry_t *)parent)->type);
+ HDassert((((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) ||
+ (((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_CHK_ID));
+
+ if ( (((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) ) {
+
+ HDassert(proxy->oh_fd_parent_addr != HADDR_UNDEF);
+ HDassert(proxy->oh_fd_parent_ptr != NULL);
+ HDassert(proxy->oh_fd_parent_addr ==
+ (((H5C_cache_entry_t *)(parent))->addr));
+ HDassert(proxy->oh_fd_parent_ptr == parent);
+
+ } else {
+
+ hbool_t found = FALSE;
+
+ HDassert(proxy->chk_fd_parent_alloc > 0);
+ HDassert(proxy->chk_fd_parent_count > 0);
+ HDassert(proxy->chk_fd_parent_addrs);
+ HDassert(proxy->chk_fd_parent_ptrs);
+
+ for( i = 0; i < proxy->chk_fd_parent_count; i++ ) {
+
+ if ( proxy->chk_fd_parent_ptrs[i] == parent ) {
+
+ found = TRUE;
+ break;
+ }
+ }
+
+ HDassert(found);
+ HDassert(proxy->chk_fd_parent_addrs[i] ==
+ (((H5C_cache_entry_t *)(parent))->addr));
+ }
+#endif /* NDEBUG */
+
+ /* destroy the flush dependency on the parent object */
+ if(H5AC_destroy_flush_dependency(parent, proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "unable to destroy flush dependency")
+
+ /* delete parent from object header's list of parents */
+ if ( (((H5C_cache_entry_t *)(parent))->type->id == H5AC_OHDR_ID) ) {
+
+ proxy->oh_fd_parent_addr = HADDR_UNDEF;
+ proxy->oh_fd_parent_ptr = NULL;
+
+ } else {
+
+ /* find parent in parent ptrs array */
+ for ( i = 0; i < proxy->chk_fd_parent_count; i++ ) {
+
+ if ( proxy->chk_fd_parent_ptrs[i] == parent ) {
+
+ break;
+ }
+ }
+
+ if ( i == proxy->chk_fd_parent_count )
+
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNDEPEND, FAIL, "Parent entry isn't in chunk fd parent list")
+
+ /* Remove parent entry from chunk fd parent ptr and addr lists */
+ if ( i < proxy->chk_fd_parent_count - 1 ) {
+
+ HDmemmove(&proxy->chk_fd_parent_addrs[i],
+ &proxy->chk_fd_parent_addrs[i+1],
+ (proxy->chk_fd_parent_count - i - 1)
+ * sizeof(proxy->chk_fd_parent_addrs[0]));
+
+ HDmemmove(&proxy->chk_fd_parent_ptrs[i],
+ &proxy->chk_fd_parent_ptrs[i+1],
+ (proxy->chk_fd_parent_count - i - 1)
+ * sizeof(proxy->chk_fd_parent_ptrs[0]));
+ }
+
+ proxy->chk_fd_parent_count--;
+
+ /* shrink or free the fd parent ptr and addr lists as appropriate */
+ if( proxy->chk_fd_parent_count == 0 ) {
+
+ proxy->chk_fd_parent_addrs = (haddr_t *)
+ H5FL_BLK_FREE(parent_addr, proxy->chk_fd_parent_addrs);
+
+ proxy->chk_fd_parent_ptrs = (void **)
+ H5FL_BLK_FREE(parent_ptr, proxy->chk_fd_parent_ptrs);
+
+ proxy->chk_fd_parent_alloc = 0;
+
+ }
+ else if ( ( proxy->chk_fd_parent_count > H5O_FD_PAR_LIST_BASE ) &&
+ ( proxy->chk_fd_parent_count <=
+ (proxy->chk_fd_parent_alloc / 4) ) ) {
+
+ if ( NULL == (proxy->chk_fd_parent_addrs = (haddr_t *)
+ H5FL_BLK_REALLOC(parent_addr, proxy->chk_fd_parent_addrs,
+ (proxy->chk_fd_parent_alloc / 4) *
+ sizeof(haddr_t))) )
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for chk fd parent addr list")
+
+ if ( NULL == (proxy->chk_fd_parent_ptrs = (void **)
+ H5FL_BLK_REALLOC(parent_ptr, proxy->chk_fd_parent_ptrs,
+ (proxy->chk_fd_parent_alloc / 4) *
+ sizeof(void *))) )
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for chk fd parent ptr list")
+
+ proxy->chk_fd_parent_alloc /= 4;
+
+ } /* end if */
+ }
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5O_proxy_undepend_core() */
+
diff --git a/src/H5Opublic.h b/src/H5Opublic.h
index a7d386a..dec7b5b 100644
--- a/src/H5Opublic.h
+++ b/src/H5Opublic.h
@@ -182,6 +182,11 @@ H5_DLL herr_t H5Ovisit_by_name(hid_t loc_id, const char *obj_name,
H5_index_t idx_type, H5_iter_order_t order, H5O_iterate_t op,
void *op_data, hid_t lapl_id);
H5_DLL herr_t H5Oclose(hid_t object_id);
+H5_DLL herr_t H5Oflush(hid_t obj_id);
+H5_DLL herr_t H5Orefresh(hid_t oid);
+H5_DLL herr_t H5Odisable_mdc_flushes(hid_t object_id);
+H5_DLL herr_t H5Oenable_mdc_flushes(hid_t object_id);
+H5_DLL herr_t H5Oare_mdc_flushes_disabled(hid_t object_id, hbool_t *are_disabled);
/* Symbols defined for compatibility with previous versions of the HDF5 API.
*
diff --git a/src/H5Otest.c b/src/H5Otest.c
index 1c149e3..a911469 100644
--- a/src/H5Otest.c
+++ b/src/H5Otest.c
@@ -159,6 +159,7 @@ htri_t
H5O_is_attr_empty_test(hid_t oid)
{
H5O_t *oh = NULL; /* Object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
H5O_ainfo_t ainfo; /* Attribute information for object */
htri_t ainfo_exists = FALSE; /* Whether the attribute info exists in the file */
@@ -194,11 +195,17 @@ H5O_is_attr_empty_test(hid_t oid)
/* Check for any messages in object header */
HDassert(nattrs == 0);
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, H5AC_ind_dxpl_id, oh)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Set metadata tag in dxpl_id */
H5_BEGIN_TAG(H5AC_ind_dxpl_id, loc->addr, FAIL);
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Reset metadata tag in dxpl_id */
@@ -223,6 +230,8 @@ done:
/* Release resources */
if(bt2_name && H5B2_close(bt2_name, H5AC_ind_dxpl_id) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree for name index")
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unprotect(loc, H5AC_ind_dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
@@ -253,6 +262,7 @@ herr_t
H5O_num_attrs_test(hid_t oid, hsize_t *nattrs)
{
H5O_t *oh = NULL; /* Object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
H5O_ainfo_t ainfo; /* Attribute information for object */
H5O_loc_t *loc; /* Pointer to object's location */
@@ -287,11 +297,17 @@ H5O_num_attrs_test(hid_t oid, hsize_t *nattrs)
/* Check for any messages in object header */
HDassert(obj_nattrs == 0);
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, H5AC_ind_dxpl_id, oh)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Set metadata tag in dxpl_id */
H5_BEGIN_TAG(H5AC_ind_dxpl_id, loc->addr, FAIL);
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Reset metadata tag in dxpl_id */
@@ -313,6 +329,8 @@ done:
/* Release resources */
if(bt2_name && H5B2_close(bt2_name, H5AC_ind_dxpl_id) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree for name index")
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unprotect(loc, H5AC_ind_dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
@@ -345,6 +363,7 @@ herr_t
H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count)
{
H5O_t *oh = NULL; /* Object header */
+ H5O_proxy_t *oh_proxy = NULL; /* Object header proxy */
H5B2_t *bt2_name = NULL; /* v2 B-tree handle for name index */
H5B2_t *bt2_corder = NULL; /* v2 B-tree handle for creation order index */
H5O_ainfo_t ainfo; /* Attribute information for object */
@@ -372,6 +391,12 @@ H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count)
HGOTO_ERROR_TAG(H5E_ATTR, H5E_CANTGET, FAIL, "can't check for attribute info message")
} /* end if */
+ /* Check for SWMR writes to the file */
+ if(H5F_INTENT(loc->file) & H5F_ACC_SWMR_WRITE)
+ /* Pin the attribute's object header proxy */
+ if(NULL == (oh_proxy = H5O_pin_flush_dep_proxy_oh(loc->file, H5AC_ind_dxpl_id, oh)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPIN, FAIL, "unable to pin object header proxy")
+
/* Check for 'dense' attribute storage file addresses being defined */
if(!H5F_addr_defined(ainfo.fheap_addr))
HGOTO_DONE_TAG(FAIL, FAIL)
@@ -379,7 +404,7 @@ H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count)
HGOTO_DONE_TAG(FAIL, FAIL)
/* Open the name index v2 B-tree */
- if(NULL == (bt2_name = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.name_bt2_addr, NULL)))
+ if(NULL == (bt2_name = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.name_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for name index")
/* Retrieve # of records in name index */
@@ -389,7 +414,7 @@ H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count)
/* Check if there is a creation order index */
if(H5F_addr_defined(ainfo.corder_bt2_addr)) {
/* Open the creation order index v2 B-tree */
- if(NULL == (bt2_corder = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.corder_bt2_addr, NULL)))
+ if(NULL == (bt2_corder = H5B2_open(loc->file, H5AC_ind_dxpl_id, ainfo.corder_bt2_addr, NULL, oh_proxy)))
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for creation order index")
/* Retrieve # of records in creation order index */
@@ -408,6 +433,8 @@ done:
HDONE_ERROR(H5E_OHDR, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree for name index")
if(bt2_corder && H5B2_close(bt2_corder, H5AC_ind_dxpl_id) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree for creation order index")
+ if(oh_proxy && H5O_unpin_flush_dep_proxy(oh_proxy) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPIN, FAIL, "unable to unpin attribute object header proxy")
if(oh && H5O_unprotect(loc, H5AC_ind_dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
diff --git a/src/H5Pdapl.c b/src/H5Pdapl.c
index eab9337..9f7822e 100644
--- a/src/H5Pdapl.c
+++ b/src/H5Pdapl.c
@@ -72,6 +72,11 @@
#define H5D_ACS_VDS_PRINTF_GAP_DEF (hsize_t)0
#define H5D_ACS_VDS_PRINTF_GAP_ENC H5P__encode_hsize_t
#define H5D_ACS_VDS_PRINTF_GAP_DEC H5P__decode_hsize_t
+/* Definition for append flush */
+#define H5D_ACS_APPEND_FLUSH_SIZE sizeof(H5D_append_flush_t)
+#define H5D_ACS_APPEND_FLUSH_DEF {0,{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0},NULL,NULL}
+
+
/******************/
/* Local Typedefs */
@@ -133,7 +138,7 @@ const H5P_libclass_t H5P_CLS_DACC[1] = {{
/*******************/
/* Local Variables */
/*******************/
-
+static const H5D_append_flush_t H5D_def_append_flush_g = H5D_ACS_APPEND_FLUSH_DEF; /* Default setting for append flush */
/*-------------------------------------------------------------------------
@@ -187,6 +192,12 @@ H5P__dacc_reg_prop(H5P_genclass_t *pclass)
NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register info for append flush */
+ /* (Note: this property should not have an encode/decode callback -QAK) */
+ if(H5P_register_real(pclass, H5D_ACS_APPEND_FLUSH_NAME, H5D_ACS_APPEND_FLUSH_SIZE, &H5D_def_append_flush_g,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5P__dacc_reg_prop() */
@@ -787,3 +798,122 @@ done:
FUNC_LEAVE_API(ret_value)
} /* end H5Pget_virtual_printf_gap() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_append_flush
+ *
+ * Purpose: Sets the boundary, callback function, and user data in the
+ * property list.
+ * "ndims": number of array elements for boundary
+ * "boundary": used to determine whether the current dimension hits
+ * a boundary; if so, invoke the callback function and
+ * flush the dataset.
+ * "func": the callback function to invoke when the boundary is hit
+ * "udata": the user data to pass as parameter with the callback function
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_append_flush(hid_t plist_id, unsigned ndims, const hsize_t *boundary, H5D_append_cb_t func, void *udata)
+{
+ H5P_genplist_t *plist; /* property list pointer */
+ H5D_append_flush_t info;
+ unsigned u; /* local index variable */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE5("e", "iIu*hx*x", plist_id, ndims, boundary, func, udata);
+
+ /* Check arguments */
+ if(0 == ndims)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dimensionality cannot be zero")
+ if(ndims > H5S_MAX_RANK)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dimensionality is too large")
+ if(!boundary)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no boundary dimensions specified")
+
+ /* Check if the callback function is NULL and the user data is non-NULL.
+ * This is almost certainly an error as the user data will not be used. */
+ if(!func && udata)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "callback is NULL while user data is not")
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Set up values */
+ info.ndims = ndims;
+ info.func = func;
+ info.udata = udata;
+
+ HDmemset(info.boundary, 0, sizeof(info.boundary));
+ /* boundary can be 0 to indicate no boundary is set */
+ for(u = 0; u < ndims; u++) {
+ if(boundary[u] != (boundary[u] & 0xffffffff)) /* negative value (including H5S_UNLIMITED) */
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all boundary dimensions must be less than 2^32")
+ info.boundary[u] = boundary[u]; /* Store user's boundary dimensions */
+ } /* end for */
+
+ /* Set values */
+ if(H5P_set(plist, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set append flush")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pset_append_flush() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_append_flush()
+ *
+ * Purpose: Retrieves the boundary, callback function and user data set in
+ * property list.
+ * Note that the # of boundary sizes to retrieve will not exceed
+ * the parameter "ndims" and the ndims set previously via
+ * H5Pset_append_flush().
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_append_flush(hid_t plist_id, unsigned ndims, hsize_t boundary[], H5D_append_cb_t *func, void **udata)
+{
+ H5P_genplist_t *plist; /* property list pointer */
+ H5D_append_flush_t info;
+ unsigned u; /* local index variable */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE5("e", "iIu*h*x**x", plist_id, ndims, boundary, func, udata);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve info for append flush */
+ if(H5P_get(plist, H5D_ACS_APPEND_FLUSH_NAME, &info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get object flush callback")
+
+ /* Assign return values */
+ if(boundary) {
+ HDmemset(boundary, 0, ndims * sizeof(hsize_t));
+ if(info.ndims > 0)
+ for(u = 0; u < info.ndims && u < ndims; u++)
+ boundary[u] = info.boundary[u];
+ } /* end if */
+ if(func)
+ *func = info.func;
+ if(udata)
+ *udata = info.udata;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pget_append_flush() */
+
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 145e1b5..c6a8f9a 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -29,14 +29,14 @@
/****************/
#include "H5Pmodule.h" /* This source code file is part of the H5P module */
+#define H5D_FRIEND /* Suppress error about including H5Dpkg */
/***********/
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
-#include "H5ACprivate.h" /* Metadata cache */
-#include "H5Dprivate.h" /* Datasets */
+#include "H5Dpkg.h" /* Datasets */
#include "H5Eprivate.h" /* Error handling */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
@@ -45,6 +45,7 @@
#include "H5Ppkg.h" /* Property lists */
#include "H5Sprivate.h" /* Dataspaces */
#include "H5Tprivate.h" /* Datatypes */
+#include "H5VMprivate.h" /* Vectors and arrays */
#include "H5Zprivate.h" /* Data filters */
@@ -55,28 +56,28 @@
/* Define default layout information */
#define H5D_DEF_STORAGE_COMPACT_INIT {(hbool_t)FALSE, (size_t)0, NULL}
#define H5D_DEF_STORAGE_CONTIG_INIT {HADDR_UNDEF, (hsize_t)0}
-#define H5D_DEF_STORAGE_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, HADDR_UNDEF, NULL, {{HADDR_UNDEF, NULL}}}
-#define H5D_DEF_LAYOUT_CHUNK_INIT {(unsigned)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, (uint32_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}
+#define H5D_DEF_STORAGE_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, HADDR_UNDEF, H5D_COPS_BTREE, {{HADDR_UNDEF, NULL}}}
+#define H5D_DEF_LAYOUT_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, (uint8_t)0, (unsigned)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, (unsigned)0, (uint32_t)0, (hsize_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {{{(uint8_t)0}}}}
#define H5D_DEF_STORAGE_VIRTUAL_INIT {{HADDR_UNDEF, 0}, 0, NULL, 0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, H5D_VDS_ERROR, HSIZE_UNDEF, -1, -1, FALSE}
#ifdef H5_HAVE_C99_DESIGNATED_INITIALIZER
#define H5D_DEF_STORAGE_COMPACT {H5D_COMPACT, { .compact = H5D_DEF_STORAGE_COMPACT_INIT }}
#define H5D_DEF_STORAGE_CONTIG {H5D_CONTIGUOUS, { .contig = H5D_DEF_STORAGE_CONTIG_INIT }}
#define H5D_DEF_STORAGE_CHUNK {H5D_CHUNKED, { .chunk = H5D_DEF_STORAGE_CHUNK_INIT }}
#define H5D_DEF_STORAGE_VIRTUAL {H5D_VIRTUAL, { .virt = H5D_DEF_STORAGE_VIRTUAL_INIT }}
-#define H5D_DEF_LAYOUT_COMPACT {H5D_COMPACT, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_COMPACT}
-#define H5D_DEF_LAYOUT_CONTIG {H5D_CONTIGUOUS, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_CONTIG}
-#define H5D_DEF_LAYOUT_CHUNK {H5D_CHUNKED, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_CHUNK}
-#define H5D_DEF_LAYOUT_VIRTUAL {H5D_VIRTUAL, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_VIRTUAL}
+#define H5D_DEF_LAYOUT_COMPACT {H5D_COMPACT, H5O_LAYOUT_VERSION_DEFAULT, H5D_LOPS_COMPACT, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_COMPACT}
+#define H5D_DEF_LAYOUT_CONTIG {H5D_CONTIGUOUS, H5O_LAYOUT_VERSION_DEFAULT, H5D_LOPS_CONTIG, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_CONTIG}
+#define H5D_DEF_LAYOUT_CHUNK {H5D_CHUNKED, H5O_LAYOUT_VERSION_DEFAULT, H5D_LOPS_CHUNK, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_CHUNK}
+#define H5D_DEF_LAYOUT_VIRTUAL {H5D_VIRTUAL, H5O_LAYOUT_VERSION_4, H5D_LOPS_VIRTUAL, {H5D_DEF_LAYOUT_CHUNK_INIT}, H5D_DEF_STORAGE_VIRTUAL}
#else /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
/* Note that the compact & chunked layout initialization values are using the
* contiguous layout initialization in the union, because the contiguous
* layout is first in the union. These values are overridden in the
* H5P__init_def_layout() routine. -QAK
*/
-#define H5D_DEF_LAYOUT_COMPACT {H5D_COMPACT, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
-#define H5D_DEF_LAYOUT_CONTIG {H5D_CONTIGUOUS, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
-#define H5D_DEF_LAYOUT_CHUNK {H5D_CHUNKED, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
-#define H5D_DEF_LAYOUT_VIRTUAL {H5D_VIRTUAL, H5O_LAYOUT_VERSION_3, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
+#define H5D_DEF_LAYOUT_COMPACT {H5D_COMPACT, H5O_LAYOUT_VERSION_DEFAULT, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
+#define H5D_DEF_LAYOUT_CONTIG {H5D_CONTIGUOUS, H5O_LAYOUT_VERSION_DEFAULT, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
+#define H5D_DEF_LAYOUT_CHUNK {H5D_CHUNKED, H5O_LAYOUT_VERSION_DEFAULT, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
+#define H5D_DEF_LAYOUT_VIRTUAL {H5D_VIRTUAL, H5O_LAYOUT_VERSION_4, NULL, {H5D_DEF_LAYOUT_CHUNK_INIT}, {H5D_CONTIGUOUS, H5D_DEF_STORAGE_CONTIG_INIT}}
#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
/* ======== Dataset creation properties ======== */
@@ -1997,6 +1998,7 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
H5P_genplist_t *plist; /* Property list pointer */
H5O_layout_t chunk_layout; /* Layout information for setting chunk info */
uint64_t chunk_nelmts; /* Number of elements in chunk */
+ unsigned max_enc_bytes_per_dim; /* Max. number of bytes required to encode this dimension */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2024,7 +2026,10 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
HDmemcpy(&chunk_layout, &H5D_def_layout_chunk_g, sizeof(H5D_def_layout_chunk_g));
HDmemset(&chunk_layout.u.chunk.dim, 0, sizeof(chunk_layout.u.chunk.dim));
chunk_nelmts = 1;
+ max_enc_bytes_per_dim = 0;
for(u = 0; u < (unsigned)ndims; u++) {
+ unsigned enc_bytes_per_dim; /* Number of bytes required to encode this dimension */
+
if(dim[u] == 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be positive")
if(dim[u] != (dim[u] & 0xffffffff))
@@ -2033,7 +2038,16 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
if(chunk_nelmts > (uint64_t)0xffffffff)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "number of elements in chunk must be < 4GB")
chunk_layout.u.chunk.dim[u] = (uint32_t)dim[u]; /* Store user's chunk dimensions */
+
+ /* Get encoded size of dim, in bytes */
+ enc_bytes_per_dim = (H5VM_log2_gen(dim[u]) + 8) / 8;
+
+ /* Check if this is the largest value so far */
+ if(enc_bytes_per_dim > max_enc_bytes_per_dim)
+ max_enc_bytes_per_dim = enc_bytes_per_dim;
} /* end for */
+ HDassert(max_enc_bytes_per_dim > 0 && max_enc_bytes_per_dim <= 8);
+ chunk_layout.u.chunk.enc_bytes_per_dim = max_enc_bytes_per_dim;
/* Get the plist structure */
if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
@@ -2617,6 +2631,127 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Pset_chunk_opts
+ *
+ * Purpose: Sets the options related to chunked storage for a dataset.
+ * The storage must already be set to chunked.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Thursday, January 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_chunk_opts(hid_t plist_id, unsigned options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ uint8_t layout_flags = 0; /* "options" translated into layout message flags format */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "iIu", plist_id, options);
+
+ /* Check arguments */
+ if(options & ~(H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "unknown chunk options")
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P__init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ /* Translate options into flags that can be used with the layout message */
+ if(options & H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS)
+ layout_flags |= H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS;
+
+ /* Update the layout message, including the version (if necessary) */
+ /* This probably isn't the right way to do this, and should be changed once
+ * this branch gets the "real" way to set the layout version */
+ layout.u.chunk.flags = layout_flags;
+ if(layout.version < H5O_LAYOUT_VERSION_4)
+ layout.version = H5O_LAYOUT_VERSION_4;
+
+ /* Set layout value */
+ if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't set layout")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pset_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_chunk_opts
+ *
+ * Purpose: Gets the options related to chunked storage for a dataset.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * Friday, January 22, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_chunk_opts(hid_t plist_id, unsigned *options)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5O_layout_t layout; /* Layout information for setting chunk info */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*Iu", plist_id, options);
+
+#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER
+ /* If the compiler doesn't support C99 designated initializers, check if
+ * the default layout structs have been initialized yet or not. *ick* -QAK
+ */
+ if(!H5P_dcrt_def_layout_init_g)
+ if(H5P__init_def_layout() < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info")
+#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the layout property */
+ if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout")
+ if(H5D_CHUNKED != layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout")
+
+ if(options) {
+ /* Translate options from flags that can be used with the layout message
+ * to those known to the public */
+ *options = 0;
+ if(layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS)
+ *options |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_chunk_opts() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pset_external
*
* Purpose: Adds an external file to the list of external files. PLIST_ID
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index d56a52a..a4d8293 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -260,7 +260,6 @@ static const void *H5D_def_vlen_alloc_info_g = H5D_XFER_VLEN_ALLOC_INFO_DEF; /
static const H5MM_free_t H5D_def_vlen_free_g = H5D_XFER_VLEN_FREE_DEF; /* Default value for vlen free function */
static const void *H5D_def_vlen_free_info_g = H5D_XFER_VLEN_FREE_INFO_DEF; /* Default value for vlen free information */
static const size_t H5D_def_hyp_vec_size_g = H5D_XFER_HYPER_VECTOR_SIZE_DEF; /* Default value for vector size */
-static const haddr_t H5D_def_metadata_tag_g = H5AC_METADATA_TAG_DEF; /* Default value for metadata tag */
static const H5FD_mpio_xfer_t H5D_def_io_xfer_mode_g = H5D_XFER_IO_XFER_MODE_DEF; /* Default value for I/O transfer mode */
static const H5FD_mpio_chunk_opt_t H5D_def_mpio_chunk_opt_mode_g = H5D_XFER_MPIO_CHUNK_OPT_HARD_DEF;
static const H5FD_mpio_collective_opt_t H5D_def_mpio_collective_opt_mode_g = H5D_XFER_MPIO_COLLECTIVE_OPT_DEF;
@@ -298,6 +297,7 @@ static const H5AC_ring_t H5D_ring_g = H5AC_XFER_RING_DEF; /* Default value for t
static herr_t
H5P__dxfr_reg_prop(H5P_genclass_t *pclass)
{
+ H5C_tag_t tag = H5C_TAG_DEF; /* Default value for cache entry tag */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -308,10 +308,8 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass)
NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
- /* Register the metadata tag property */
- /* (Note: this property should not have an encode/decode callback -QAK) */
- if(H5P_register_real(pclass, H5AC_METADATA_TAG_NAME, H5AC_METADATA_TAG_SIZE, &H5D_def_metadata_tag_g,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ /* Register the cache tag property */
+ if(H5P_register_real(pclass, H5C_TAG_NAME, H5C_TAG_SIZE, &tag, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
/* Register the type conversion buffer property */
diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c
index 14e1c91..40a529b 100644
--- a/src/H5Pfapl.c
+++ b/src/H5Pfapl.c
@@ -178,6 +178,37 @@
#define H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_DEF 524288
#define H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_ENC H5P__encode_size_t
#define H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_DEC H5P__decode_size_t
+/* Definition for # of metadata read attempts */
+#define H5F_ACS_METADATA_READ_ATTEMPTS_SIZE sizeof(unsigned)
+#define H5F_ACS_METADATA_READ_ATTEMPTS_DEF 0
+#define H5F_ACS_METADATA_READ_ATTEMPTS_ENC H5P__encode_unsigned
+#define H5F_ACS_METADATA_READ_ATTEMPTS_DEC H5P__decode_unsigned
+/* Definition for object flush callback */
+#define H5F_ACS_OBJECT_FLUSH_CB_SIZE sizeof(H5F_object_flush_t)
+#define H5F_ACS_OBJECT_FLUSH_CB_DEF {NULL, NULL}
+/* Definition for status_flags in the superblock */
+#define H5F_ACS_CLEAR_STATUS_FLAGS_SIZE sizeof(hbool_t)
+#define H5F_ACS_CLEAR_STATUS_FLAGS_DEF FALSE
+/* Definition for 'use metadata cache logging' flag */
+#define H5F_ACS_USE_MDC_LOGGING_SIZE sizeof(hbool_t)
+#define H5F_ACS_USE_MDC_LOGGING_DEF FALSE
+#define H5F_ACS_USE_MDC_LOGGING_ENC H5P__encode_hbool_t
+#define H5F_ACS_USE_MDC_LOGGING_DEC H5P__decode_hbool_t
+/* Definition for 'mdc log location' flag */
+#define H5F_ACS_MDC_LOG_LOCATION_SIZE sizeof(char *)
+#define H5F_ACS_MDC_LOG_LOCATION_DEF NULL /* default is no log location */
+#define H5F_ACS_MDC_LOG_LOCATION_ENC H5P_facc_mdc_log_location_enc
+#define H5F_ACS_MDC_LOG_LOCATION_DEC H5P_facc_mdc_log_location_dec
+#define H5F_ACS_MDC_LOG_LOCATION_DEL H5P_facc_mdc_log_location_del
+#define H5F_ACS_MDC_LOG_LOCATION_COPY H5P_facc_mdc_log_location_copy
+#define H5F_ACS_MDC_LOG_LOCATION_CMP H5P_facc_mdc_log_location_cmp
+#define H5F_ACS_MDC_LOG_LOCATION_CLOSE H5P_facc_mdc_log_location_close
+/* Definition for 'start metadata cache logging on access' flag */
+#define H5F_ACS_START_MDC_LOG_ON_ACCESS_SIZE sizeof(hbool_t)
+#define H5F_ACS_START_MDC_LOG_ON_ACCESS_DEF FALSE
+#define H5F_ACS_START_MDC_LOG_ON_ACCESS_ENC H5P__encode_hbool_t
+#define H5F_ACS_START_MDC_LOG_ON_ACCESS_DEC H5P__decode_hbool_t
+
/******************/
/* Local Typedefs */
@@ -224,6 +255,14 @@ static herr_t H5P__facc_fclose_degree_dec(const void **pp, void *value);
static herr_t H5P__facc_multi_type_enc(const void *value, void **_pp, size_t *size);
static herr_t H5P__facc_multi_type_dec(const void **_pp, void *value);
+/* Metadata cache log location property callbacks */
+static herr_t H5P_facc_mdc_log_location_enc(const void *value, void **_pp, size_t *size);
+static herr_t H5P_facc_mdc_log_location_dec(const void **_pp, void *value);
+static herr_t H5P_facc_mdc_log_location_del(hid_t prop_id, const char *name, size_t size, void *value);
+static herr_t H5P_facc_mdc_log_location_copy(const char *name, size_t size, void *value);
+static int H5P_facc_mdc_log_location_cmp(const void *value1, const void *value2, size_t size);
+static herr_t H5P_facc_mdc_log_location_close(const char *name, size_t size, void *value);
+
/*********************/
/* Package Variables */
@@ -280,6 +319,12 @@ static const unsigned H5F_def_efc_size_g = H5F_ACS_EFC_SIZE_DEF;
static const H5FD_file_image_info_t H5F_def_file_image_info_g = H5F_ACS_FILE_IMAGE_INFO_DEF; /* Default file image info and callbacks */
static const hbool_t H5F_def_core_write_tracking_flag_g = H5F_ACS_CORE_WRITE_TRACKING_FLAG_DEF; /* Default setting for core VFD write tracking */
static const size_t H5F_def_core_write_tracking_page_size_g = H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_DEF; /* Default core VFD write tracking page size */
+static const unsigned H5F_def_metadata_read_attempts_g = H5F_ACS_METADATA_READ_ATTEMPTS_DEF; /* Default setting for the # of metadata read attempts */
+static const H5F_object_flush_t H5F_def_object_flush_cb_g = H5F_ACS_OBJECT_FLUSH_CB_DEF; /* Default setting for object flush callback */
+static const hbool_t H5F_def_clear_status_flags_g = H5F_ACS_CLEAR_STATUS_FLAGS_DEF; /* Default to clear the superblock status_flags */
+static const hbool_t H5F_def_use_mdc_logging_g = H5F_ACS_USE_MDC_LOGGING_DEF; /* Default metadata cache logging flag */
+static const char *H5F_def_mdc_log_location_g = H5F_ACS_MDC_LOG_LOCATION_DEF; /* Default mdc log location */
+static const hbool_t H5F_def_start_mdc_log_on_access_g = H5F_ACS_START_MDC_LOG_ON_ACCESS_DEF; /* Default mdc log start on access flag */
@@ -437,6 +482,39 @@ H5P__facc_reg_prop(H5P_genclass_t *pclass)
NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the # of read attempts */
+ if(H5P_register_real(pclass, H5F_ACS_METADATA_READ_ATTEMPTS_NAME, H5F_ACS_METADATA_READ_ATTEMPTS_SIZE, &H5F_def_metadata_read_attempts_g,
+ NULL, NULL, NULL, H5F_ACS_METADATA_READ_ATTEMPTS_ENC, H5F_ACS_METADATA_READ_ATTEMPTS_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register object flush callback */
+ /* (Note: this property should not have an encode/decode callback -QAK) */
+ if(H5P_register_real(pclass, H5F_ACS_OBJECT_FLUSH_CB_NAME, H5F_ACS_OBJECT_FLUSH_CB_SIZE, &H5F_def_object_flush_cb_g,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the private property of whether to clear the superblock status_flags. It's used by h5clear only. */
+ if(H5P_register_real(pclass, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, H5F_ACS_CLEAR_STATUS_FLAGS_SIZE, &H5F_def_clear_status_flags_g,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the metadata cache logging flag. */
+ if(H5P_register_real(pclass, H5F_ACS_USE_MDC_LOGGING_NAME, H5F_ACS_USE_MDC_LOGGING_SIZE, &H5F_def_use_mdc_logging_g,
+ NULL, NULL, NULL, H5F_ACS_USE_MDC_LOGGING_ENC, H5F_ACS_USE_MDC_LOGGING_DEC, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the metadata cache log location. */
+ if(H5P_register_real(pclass, H5F_ACS_MDC_LOG_LOCATION_NAME, H5F_ACS_MDC_LOG_LOCATION_SIZE, &H5F_def_mdc_log_location_g,
+ NULL, NULL, NULL, H5F_ACS_MDC_LOG_LOCATION_ENC, H5F_ACS_MDC_LOG_LOCATION_DEC,
+ H5F_ACS_MDC_LOG_LOCATION_DEL, H5F_ACS_MDC_LOG_LOCATION_COPY, H5F_ACS_MDC_LOG_LOCATION_CMP, H5F_ACS_MDC_LOG_LOCATION_CLOSE) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the flag that indicates whether mdc logging starts on file access. */
+ if(H5P_register_real(pclass, H5F_ACS_START_MDC_LOG_ON_ACCESS_NAME, H5F_ACS_START_MDC_LOG_ON_ACCESS_SIZE, &H5F_def_start_mdc_log_on_access_g,
+ NULL, NULL, NULL, H5F_ACS_START_MDC_LOG_ON_ACCESS_ENC, H5F_ACS_START_MDC_LOG_ON_ACCESS_DEC, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5P__facc_reg_prop() */
@@ -2883,16 +2961,6 @@ H5P__facc_cache_config_cmp(const void *_config1, const void *_config2, size_t H5
if(config1->rpt_fcn_enabled < config2->rpt_fcn_enabled) HGOTO_DONE(-1);
if(config1->rpt_fcn_enabled > config2->rpt_fcn_enabled) HGOTO_DONE(1);
- if(config1->open_trace_file < config2->open_trace_file) HGOTO_DONE(-1);
- if(config1->open_trace_file > config2->open_trace_file) HGOTO_DONE(1);
-
- if(config1->close_trace_file < config2->close_trace_file) HGOTO_DONE(-1);
- if(config1->close_trace_file > config2->close_trace_file) HGOTO_DONE(1);
-
- if((ret_value = HDstrncmp(config1->trace_file_name, config2->trace_file_name,
- (size_t)(H5AC__MAX_TRACE_FILE_NAME_LEN + 1))) != 0)
- HGOTO_DONE(ret_value);
-
if(config1->evictions_enabled < config2->evictions_enabled) HGOTO_DONE(-1);
if(config1->evictions_enabled > config2->evictions_enabled) HGOTO_DONE(1);
@@ -3475,3 +3543,486 @@ done:
FUNC_LEAVE_API(ret_value)
}
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_metadata_read_attempts
+ *
+ * Purpose: Sets the # of read attempts in the file access property list
+ * when reading metadata with checksum.
+ * The # of read attempts set via this routine will only apply
+ * when opening a file with SWMR access.
+ * The # of read attempts set via this routine does not have
+ * any effect when opening a file with non-SWMR access; for this
+ * case, the # of read attempts will be always be 1.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_metadata_read_attempts(hid_t plist_id, unsigned attempts)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "iIu", plist_id, attempts);
+
+ /* Cannot set the # of attempts to 0 */
+ if(attempts == 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "number of metadatata read attempts must be greater than 0");
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Set values */
+ if(H5P_set(plist, H5F_ACS_METADATA_READ_ATTEMPTS_NAME, &attempts) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set # of metadata read attempts")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pset_metadata_read_attempts() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_metadata_read_attempts
+ *
+ * Purpose: Returns the # of metadata read attempts set in the file access property list.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Sept 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_metadata_read_attempts(hid_t plist_id, unsigned *attempts/*out*/)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "ix", plist_id, attempts);
+
+ /* Get values */
+ if(attempts) {
+ H5P_genplist_t *plist; /* Property list pointer */
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Get the # of read attempts set */
+ if(H5P_get(plist, H5F_ACS_METADATA_READ_ATTEMPTS_NAME, attempts) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get the number of metadata read attempts")
+
+ /* If not set, return the default value */
+ if(*attempts == H5F_ACS_METADATA_READ_ATTEMPTS_DEF) /* 0 */
+ *attempts = H5F_METADATA_READ_ATTEMPTS;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_metadata_read_attempts() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_obj_flush_cb
+ *
+ * Purpose: Sets the callback function to invoke and the user data when an
+ * object flush occurs in the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_object_flush_cb(hid_t plist_id, H5F_flush_cb_t func, void *udata)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5F_object_flush_t flush_info;
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "ix*x", plist_id, func, udata);
+
+ /* Check if the callback function is NULL and the user data is non-NULL.
+ * This is almost certainly an error as the user data will not be used. */
+ if(!func && udata)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "callback is NULL while user data is not")
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Update property list */
+ flush_info.func = func;
+ flush_info.udata = udata;
+
+ /* Set values */
+ if(H5P_set(plist, H5F_ACS_OBJECT_FLUSH_CB_NAME, &flush_info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set object flush callback")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pset_obj_flush_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_obj_flush_cb
+ *
+ * Purpose: Retrieves the callback function and user data set in the
+ * property list for an object flush.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Dec 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_object_flush_cb(hid_t plist_id, H5F_flush_cb_t *func, void **udata)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ H5F_object_flush_t flush_info;
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "i*x**x", plist_id, func, udata);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Retrieve the callback function and user data */
+ if(H5P_get(plist, H5F_ACS_OBJECT_FLUSH_CB_NAME, &flush_info) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get object flush callback")
+
+ /* Assign return value */
+ if(func)
+ *func = flush_info.func;
+ if(udata)
+ *udata = flush_info.udata;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pget_obj_flush_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_mdc_log_options
+ *
+ * Purpose: Set metadata cache log options.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_mdc_log_options(hid_t plist_id, hbool_t is_enabled, const char *location,
+ hbool_t start_on_access)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ char * tmp_location; /* Working location pointer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE4("e", "ib*sb", plist_id, is_enabled, location, start_on_access);
+
+ /* Check arguments */
+ if(H5P_DEFAULT == plist_id)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "can't modify default property list")
+ if(!location)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "location cannot be NULL")
+
+ /* Get the property list structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "plist_id is not a file access property list")
+
+ /* Get the current location string and free it */
+ if(H5P_get(plist, H5F_ACS_MDC_LOG_LOCATION_NAME, &tmp_location) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get current log location")
+ H5MM_xfree(tmp_location);
+
+ /* Make a copy of the passed-in location */
+ if(NULL == (tmp_location = H5MM_xstrdup(location)))
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTCOPY, FAIL, "can't copy passed-in log location")
+
+ /* Set values */
+ if(H5P_set(plist, H5F_ACS_USE_MDC_LOGGING_NAME, &is_enabled) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set is_enabled flag")
+ if(H5P_set(plist, H5F_ACS_MDC_LOG_LOCATION_NAME, &tmp_location) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set log location")
+ if(H5P_set(plist, H5F_ACS_START_MDC_LOG_ON_ACCESS_NAME, &start_on_access) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set start_on_access flag")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pset_mdc_log_options() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_mdc_log_options
+ *
+ * Purpose: Get metadata cache log options.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_mdc_log_options(hid_t plist_id, hbool_t *is_enabled, char *location,
+ size_t *location_size, hbool_t *start_on_access)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ char *location_ptr; /* Pointer to location string */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE5("e", "i*b*s*z*b", plist_id, is_enabled, location, location_size,
+ start_on_access);
+
+ /* Get the property list structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "plist_id is not a file access property list")
+
+ /* Get simple values */
+ if(is_enabled)
+ if(H5P_get(plist, H5F_ACS_USE_MDC_LOGGING_NAME, is_enabled) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get log location")
+ if(start_on_access)
+ if(H5P_get(plist, H5F_ACS_START_MDC_LOG_ON_ACCESS_NAME, start_on_access) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get start_on_access flag")
+
+ /* Get the location */
+ if(location || location_size)
+ if(H5P_get(plist, H5F_ACS_MDC_LOG_LOCATION_NAME, &location_ptr) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get log location")
+
+ /* Copy log location to output buffer */
+ if(location_ptr && location)
+ HDmemcpy(location, location_ptr, *location_size);
+
+ /* Get location size, including terminating NULL */
+ if(location_size) {
+ if(location_ptr)
+ *location_size = HDstrlen(location_ptr) + 1;
+ else
+ *location_size = 0;
+ }
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_mdc_log_options() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P_facc_mdc_log_location_enc
+ *
+ * Purpose: Callback routine which is called whenever the metadata
+ * cache log location property in the file access property
+ * list is encoded.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P_facc_mdc_log_location_enc(const void *value, void **_pp, size_t *size)
+{
+ const char *log_location = *(const char * const *)value;
+ uint8_t **pp = (uint8_t **)_pp;
+ size_t len = 0;
+ uint64_t enc_value;
+ unsigned enc_size;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDcompile_assert(sizeof(size_t) <= sizeof(uint64_t));
+
+ /* calculate prefix length */
+ if(NULL != log_location)
+ len = HDstrlen(log_location);
+
+ enc_value = (uint64_t)len;
+ enc_size = H5VM_limit_enc_size(enc_value);
+ HDassert(enc_size < 256);
+
+ if(NULL != *pp) {
+ /* encode the length of the prefix */
+ *(*pp)++ = (uint8_t)enc_size;
+ UINT64ENCODE_VAR(*pp, enc_value, enc_size);
+
+ /* encode the prefix */
+ if(NULL != log_location) {
+ HDmemcpy(*(char **)pp, log_location, len);
+ *pp += len;
+ } /* end if */
+ } /* end if */
+
+ *size += (1 + enc_size);
+ if(NULL != log_location)
+ *size += len;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P_facc_mdc_log_location_enc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P_facc_mdc_log_location_dec
+ *
+ * Purpose: Callback routine which is called whenever the metadata
+ * cache log location property in the file access property
+ * list is decoded.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P_facc_mdc_log_location_dec(const void **_pp, void *_value)
+{
+ char **log_location = (char **)_value;
+ const uint8_t **pp = (const uint8_t **)_pp;
+ size_t len;
+ uint64_t enc_value; /* Decoded property value */
+ unsigned enc_size; /* Size of encoded property */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(pp);
+ HDassert(*pp);
+ HDassert(log_location);
+ HDcompile_assert(sizeof(size_t) <= sizeof(uint64_t));
+
+ /* Decode the size */
+ enc_size = *(*pp)++;
+ HDassert(enc_size < 256);
+
+ /* Decode the value */
+ UINT64DECODE_VAR(*pp, enc_value, enc_size);
+ len = enc_value;
+
+ if(0 != len) {
+ /* Make a copy of the user's prefix string */
+ if(NULL == (*log_location = (char *)H5MM_malloc(len + 1)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "memory allocation failed for prefix")
+ HDstrncpy(*log_location, *(const char **)pp, len);
+ (*log_location)[len] = '\0';
+
+ *pp += len;
+ } /* end if */
+ else
+ *log_location = NULL;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5P_facc_mdc_log_location_dec() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P_facc_mdc_log_location_del
+ *
+ * Purpose: Frees memory used to store the metadata cache log location.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P_facc_mdc_log_location_del(hid_t H5_ATTR_UNUSED prop_id, const char H5_ATTR_UNUSED *name,
+ size_t H5_ATTR_UNUSED size, void *value)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(value);
+
+ H5MM_xfree(*(void **)value);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P_facc_mdc_log_location_del() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P_facc_mdc_log_location_copy
+ *
+ * Purpose: Creates a copy of the metadata cache log location string.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P_facc_mdc_log_location_copy(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size, void *value)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(value);
+
+ *(char **)value = H5MM_xstrdup(*(const char **)value);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P_facc_mdc_log_location_copy() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P_facc_mdc_log_location_cmp
+ *
+ * Purpose: Callback routine which is called whenever the metadata
+ * cache log location property in the file creation property
+ * list is compared.
+ *
+ * Return: zero if VALUE1 and VALUE2 are equal, non zero otherwise.
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5P_facc_mdc_log_location_cmp(const void *value1, const void *value2, size_t H5_ATTR_UNUSED size)
+{
+ const char *pref1 = *(const char * const *)value1;
+ const char *pref2 = *(const char * const *)value2;
+ int ret_value = 0;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ if(NULL == pref1 && NULL != pref2)
+ HGOTO_DONE(1);
+ if(NULL != pref1 && NULL == pref2)
+ HGOTO_DONE(-1);
+ if(NULL != pref1 && NULL != pref2)
+ ret_value = HDstrcmp(pref1, pref2);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5P_facc_mdc_log_location_cmp() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P_facc_mdc_log_location_close
+ *
+ * Purpose: Frees memory used to store the metadata cache log location
+ * string
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P_facc_mdc_log_location_close(const char H5_ATTR_UNUSED *name, size_t H5_ATTR_UNUSED size, void *value)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(value);
+
+ H5MM_xfree(*(void **)value);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P_facc_mdc_log_location_close() */
+
diff --git a/src/H5Plapl.c b/src/H5Plapl.c
index b7c682c..ac7fabb 100644
--- a/src/H5Plapl.c
+++ b/src/H5Plapl.c
@@ -1150,7 +1150,9 @@ H5Pset_elink_acc_flags(hid_t lapl_id, unsigned flags)
H5TRACE2("e", "iIu", lapl_id, flags);
/* Check that flags are valid */
- if((flags != H5F_ACC_RDWR) && (flags != H5F_ACC_RDONLY) && (flags != H5F_ACC_DEFAULT))
+ if(( flags != H5F_ACC_RDWR) && (flags != (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE))
+ && (flags != H5F_ACC_RDONLY) && (flags != (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ))
+ && (flags != H5F_ACC_DEFAULT))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file open flags")
/* Get the plist structure */
diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h
index 105baf7..6dffcec 100644
--- a/src/H5Ppkg.h
+++ b/src/H5Ppkg.h
@@ -230,6 +230,7 @@ H5_DLL herr_t H5P_get_filter(const struct H5Z_filter_info_t *filter,
H5_DLL char *H5P_get_class_path_test(hid_t pclass_id);
H5_DLL hid_t H5P_open_class_path_test(const char *path);
H5_DLL herr_t H5P_reset_external_file_test(hid_t dcpl_id);
+H5_DLL herr_t H5P_reset_layout_test(hid_t dcpl_id);
#endif /* H5P_TESTING */
#endif /* _H5Ppkg_H */
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 932472c..1cfba40 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -347,6 +347,12 @@ H5_DLL herr_t H5Pget_file_image_callbacks(hid_t fapl_id,
H5FD_file_image_callbacks_t *callbacks_ptr);
H5_DLL herr_t H5Pset_core_write_tracking(hid_t fapl_id, hbool_t is_enabled, size_t page_size);
H5_DLL herr_t H5Pget_core_write_tracking(hid_t fapl_id, hbool_t *is_enabled, size_t *page_size);
+H5_DLL herr_t H5Pset_metadata_read_attempts(hid_t plist_id, unsigned attempts);
+H5_DLL herr_t H5Pget_metadata_read_attempts(hid_t plist_id, unsigned *attempts);
+H5_DLL herr_t H5Pset_object_flush_cb(hid_t plist_id, H5F_flush_cb_t func, void *udata);
+H5_DLL herr_t H5Pget_object_flush_cb(hid_t plist_id, H5F_flush_cb_t *func, void **udata);
+H5_DLL herr_t H5Pset_mdc_log_options(hid_t plist_id, hbool_t is_enabled, const char *location, hbool_t start_on_access);
+H5_DLL herr_t H5Pget_mdc_log_options(hid_t plist_id, hbool_t *is_enabled, char *location, size_t *location_size, hbool_t *start_on_access);
/* Dataset creation property list (DCPL) routines */
H5_DLL herr_t H5Pset_layout(hid_t plist_id, H5D_layout_t layout);
@@ -364,6 +370,8 @@ H5_DLL ssize_t H5Pget_virtual_dsetname(hid_t dcpl_id, size_t index,
char *name/*out*/, size_t size);
H5_DLL herr_t H5Pset_external(hid_t plist_id, const char *name, off_t offset,
hsize_t size);
+H5_DLL herr_t H5Pset_chunk_opts(hid_t plist_id, unsigned opts);
+H5_DLL herr_t H5Pget_chunk_opts(hid_t plist_id, unsigned *opts);
H5_DLL int H5Pget_external_count(hid_t plist_id);
H5_DLL herr_t H5Pget_external(hid_t plist_id, unsigned idx, size_t name_size,
char *name/*out*/, off_t *offset/*out*/,
@@ -396,6 +404,10 @@ H5_DLL herr_t H5Pset_virtual_view(hid_t plist_id, H5D_vds_view_t view);
H5_DLL herr_t H5Pget_virtual_view(hid_t plist_id, H5D_vds_view_t *view);
H5_DLL herr_t H5Pset_virtual_printf_gap(hid_t plist_id, hsize_t gap_size);
H5_DLL herr_t H5Pget_virtual_printf_gap(hid_t plist_id, hsize_t *gap_size);
+H5_DLL herr_t H5Pset_append_flush(hid_t plist_id, unsigned ndims,
+ const hsize_t boundary[], H5D_append_cb_t func, void *udata);
+H5_DLL herr_t H5Pget_append_flush(hid_t plist_id, unsigned dims,
+ hsize_t boundary[], H5D_append_cb_t *func, void **udata);
/* Dataset xfer property list (DXPL) routines */
H5_DLL herr_t H5Pset_data_transform(hid_t plist_id, const char* expression);
diff --git a/src/H5Ptest.c b/src/H5Ptest.c
index 8240f6a..63708e7 100644
--- a/src/H5Ptest.c
+++ b/src/H5Ptest.c
@@ -171,3 +171,49 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5P_reset_external_file_test() */
+
+/*--------------------------------------------------------------------------
+ NAME
+ H5P_reset_layout_test
+ PURPOSE
+ Routine to reset layout message
+ USAGE
+ herr_t H5P_reset_layout_test(plist)
+ hid_t dcpl_id; IN: the property list
+
+ RETURNS
+ Non-negative on success/Negative on failure
+
+ PROGRAMMER
+ Quincey Koziol
+ April 5, 2012
+--------------------------------------------------------------------------*/
+herr_t
+H5P_reset_layout_test(hid_t dcpl_id)
+{
+ H5O_layout_t layout; /* Layout message */
+ H5P_genplist_t *plist; /* Property list */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Check arguments */
+ if(NULL == (plist = (H5P_genplist_t *)H5I_object(dcpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
+
+ /* Get layout message */
+ if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get layout")
+
+ /* Clean up any values set for the layout */
+ if(H5O_msg_reset(H5O_LAYOUT_ID, &layout) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release layout info")
+
+ /* Set layout message */
+ if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set layout")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5P_reset_layout_test() */
+
diff --git a/src/H5SM.c b/src/H5SM.c
index 4a562dd..908463e 100644
--- a/src/H5SM.c
+++ b/src/H5SM.c
@@ -500,7 +500,7 @@ H5SM_create_index(H5F_t *f, H5SM_index_header_t *header, hid_t dxpl_id)
bt2_cparam.rrec_size = (uint32_t)H5SM_SOHM_ENTRY_SIZE(f);
bt2_cparam.split_percent = H5SM_B2_SPLIT_PERCENT;
bt2_cparam.merge_percent = H5SM_B2_MERGE_PERCENT;
- if(NULL == (bt2 = H5B2_create(f, dxpl_id, &bt2_cparam, f)))
+ if(NULL == (bt2 = H5B2_create(f, dxpl_id, &bt2_cparam, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTCREATE, FAIL, "B-tree creation failed for SOHM index")
/* Retrieve the v2 B-tree's address in the file */
@@ -600,7 +600,7 @@ H5SM_delete_index(H5F_t *f, H5SM_index_header_t *header, hid_t dxpl_id,
HDassert(header->index_type == H5SM_BTREE);
/* Delete the B-tree. */
- if(H5B2_delete(f, dxpl_id, header->index_addr, f, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl_id, header->index_addr, f, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTDELETE, FAIL, "unable to delete B-tree")
/* Revert to list unless B-trees can have zero records */
@@ -743,7 +743,7 @@ H5SM_convert_list_to_btree(H5F_t *f, H5SM_index_header_t *header,
bt2_cparam.rrec_size = (uint32_t)H5SM_SOHM_ENTRY_SIZE(f);
bt2_cparam.split_percent = H5SM_B2_SPLIT_PERCENT;
bt2_cparam.merge_percent = H5SM_B2_MERGE_PERCENT;
- if(NULL == (bt2 = H5B2_create(f, dxpl_id, &bt2_cparam, f)))
+ if(NULL == (bt2 = H5B2_create(f, dxpl_id, &bt2_cparam, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTCREATE, FAIL, "B-tree creation failed for SOHM index")
/* Retrieve the v2 B-tree's address in the file */
@@ -856,7 +856,7 @@ H5SM_convert_btree_to_list(H5F_t * f, H5SM_index_header_t * header, hid_t dxpl_i
/* Delete the B-tree and have messages copy themselves to the
* list as they're deleted
*/
- if(H5B2_delete(f, dxpl_id, btree_addr, f, H5SM_bt2_convert_to_list_op, list) < 0)
+ if(H5B2_delete(f, dxpl_id, btree_addr, f, NULL, H5SM_bt2_convert_to_list_op, list) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTDELETE, FAIL, "unable to delete B-tree")
done:
@@ -1339,7 +1339,7 @@ H5SM_write_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
HDassert(header->index_type == H5SM_BTREE);
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for SOHM index")
if(defer) {
@@ -1467,7 +1467,7 @@ H5SM_write_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
/* Open the index v2 B-tree, if it isn't already */
if(NULL == bt2) {
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for SOHM index")
} /* end if */
@@ -1836,7 +1836,7 @@ H5SM_delete_from_index(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
HDassert(header->index_type == H5SM_BTREE);
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for SOHM index")
/* If this returns failure, it means that the message wasn't found.
@@ -1866,7 +1866,7 @@ H5SM_delete_from_index(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
else {
/* Open the index v2 B-tree, if it isn't already */
if(NULL == bt2) {
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for SOHM index")
} /* end if */
@@ -2218,7 +2218,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
HDassert(header->index_type == H5SM_BTREE);
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, header->index_addr, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for SOHM index")
/* Look up the message in the v2 B-tree */
@@ -2752,7 +2752,7 @@ H5SM_ih_size(H5F_t *f, hid_t dxpl_id, hsize_t *hdr_size, H5_ih_info_t *ih_info)
if(table->indexes[u].index_type == H5SM_BTREE) {
if(H5F_addr_defined(table->indexes[u].index_addr)) {
/* Open the index v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl_id, table->indexes[u].index_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl_id, table->indexes[u].index_addr, f, NULL)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTOPENOBJ, FAIL, "unable to open v2 B-tree for SOHM index")
if(H5B2_size(bt2, dxpl_id, &(ih_info->index_size)) < 0)
diff --git a/src/H5SMcache.c b/src/H5SMcache.c
index bbd7e87..7b94743 100644
--- a/src/H5SMcache.c
+++ b/src/H5SMcache.c
@@ -58,7 +58,10 @@
/********************/
/* Metadata cache (H5AC) callbacks */
-static herr_t H5SM__cache_table_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5SM__cache_table_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5SM__cache_table_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5SM__cache_table_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5SM__cache_table_image_len(const void *thing, size_t *image_len,
@@ -67,7 +70,10 @@ static herr_t H5SM__cache_table_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5SM__cache_table_free_icr(void *thing);
-static herr_t H5SM__cache_list_get_load_size(const void *udata, size_t *image_len);
+static herr_t H5SM__cache_list_get_load_size(const void *image_ptr, void *udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
+static htri_t H5SM__cache_list_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *H5SM__cache_list_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5SM__cache_list_image_len(const void *thing, size_t *image_len,
@@ -88,6 +94,7 @@ const H5AC_class_t H5AC_SOHM_TABLE[1] = {{
H5FD_MEM_SOHM_TABLE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5SM__cache_table_get_load_size, /* 'get_load_size' callback */
+ H5SM__cache_table_verify_chksum, /* 'verify_chksum' callback */
H5SM__cache_table_deserialize, /* 'deserialize' callback */
H5SM__cache_table_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -104,6 +111,7 @@ const H5AC_class_t H5AC_SOHM_LIST[1] = {{
H5FD_MEM_SOHM_TABLE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5SM__cache_list_get_load_size, /* 'get_load_size' callback */
+ H5SM__cache_list_verify_chksum, /* 'verify_chksum' callback */
H5SM__cache_list_deserialize, /* 'deserialize' callback */
H5SM__cache_list_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
@@ -142,9 +150,12 @@ const H5AC_class_t H5AC_SOHM_LIST[1] = {{
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM__cache_table_get_load_size(const void *_udata, size_t *image_len)
+H5SM__cache_table_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5SM_table_cache_ud_t *udata = (const H5SM_table_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ const H5SM_table_cache_ud_t *udata = (const H5SM_table_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -153,13 +164,54 @@ H5SM__cache_table_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->f);
HDassert(image_len);
- *image_len = H5SM_TABLE_SIZE(udata->f);
+ if(image == NULL)
+ *image_len = H5SM_TABLE_SIZE(udata->f);
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5SM__cache_table_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5SM__cache_table_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5SM__cache_table_verify_chksum(const void *_image, size_t len, void H5_ATTR_UNUSED *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, len, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5SM__cache_table_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5SM__cache_table_deserialize
*
* Purpose: Given a buffer containing the on disk representation of the
@@ -184,7 +236,6 @@ H5SM__cache_table_deserialize(const void *_image, size_t len, void *_udata,
H5SM_table_cache_ud_t *udata = (H5SM_table_cache_ud_t *)_udata; /* Pointer to user data */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into input buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
size_t u; /* Counter variable for index headers */
void *ret_value = NULL; /* Return value */
@@ -258,21 +309,17 @@ H5SM__cache_table_deserialize(const void *_image, size_t len, void *_udata,
/* Compute the size of a list index for this SOHM index */
table->indexes[u].list_size = H5SM_LIST_SIZE(f, table->indexes[u].list_max);
+ table->indexes[u].list_size = H5SM_LIST_SIZE(f, table->indexes[u].list_max);
} /* end for */
+ /* checksum verification already done in verify_chksum cb */
+
/* Read in checksum */
UINT32DECODE(image, stored_chksum);
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) == table->table_size);
- /* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(_image, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
-
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_SOHM, H5E_BADVALUE, NULL, "incorrect metadata checksum for shared message table")
-
/* Set return value */
ret_value = table;
@@ -466,9 +513,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM__cache_list_get_load_size(const void *_udata, size_t *image_len)
+H5SM__cache_list_get_load_size(const void *_image, void *_udata,
+ size_t *image_len, size_t *actual_len,
+ hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
- const H5SM_list_cache_ud_t *udata = (const H5SM_list_cache_ud_t *)_udata; /* User data for callback */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ const H5SM_list_cache_ud_t *udata = (const H5SM_list_cache_ud_t *)_udata; /* User data for callback */
FUNC_ENTER_STATIC_NOERR
@@ -478,13 +528,60 @@ H5SM__cache_list_get_load_size(const void *_udata, size_t *image_len)
HDassert(udata->header->list_size > 0);
HDassert(image_len);
- *image_len = udata->header->list_size;
+ if(image == NULL)
+ *image_len = udata->header->list_size;
+ else {
+ HDassert(actual_len);
+ HDassert(*actual_len == *image_len);
+ }
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5SM__cache_list_get_load_size() */
/*-------------------------------------------------------------------------
+ * Function: H5SM__cache_list_verify_chksum
+ *
+ * Purpose: Verify the computed checksum of the data structure is the
+ * same as the stored chksum.
+ *
+ * Return: Success: TRUE/FALSE
+ * Failure: Negative
+ *
+ * Programmer: Vailin Choi; Aug 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5SM__cache_list_verify_chksum(const void *_image, size_t H5_ATTR_UNUSED len, void *_udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5SM_list_cache_ud_t *udata = (H5SM_list_cache_ud_t *)_udata; /* User data for callback */
+ size_t chk_size; /* Exact size of the node with checksum at the end */
+ uint32_t stored_chksum; /* Stored metadata checksum value */
+ uint32_t computed_chksum; /* Computed metadata checksum value */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+
+ /* Exact size with checksum at the end */
+ chk_size = H5SM_LIST_SIZE(udata->f, udata->header->num_messages);
+
+ /* Get stored and computed checksums */
+ H5F_get_checksums(image, chk_size, &stored_chksum, &computed_chksum);
+
+ if(stored_chksum != computed_chksum)
+ ret_value = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5SM__cache_list_verify_chksum() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5SM__cache_list_deserialize
*
* Purpose: Given a buffer containing the on disk image of a list of
@@ -508,7 +605,6 @@ H5SM__cache_list_deserialize(const void *_image, size_t len, void *_udata,
H5SM_bt2_ctx_t ctx; /* Message encoding context */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into input buffer */
uint32_t stored_chksum; /* Stored metadata checksum value */
- uint32_t computed_chksum; /* Computed metadata checksum value */
size_t u; /* Counter variable for messages in list */
void *ret_value = NULL; /* Return value */
@@ -546,19 +642,14 @@ H5SM__cache_list_deserialize(const void *_image, size_t len, void *_udata,
image += H5SM_SOHM_ENTRY_SIZE(udata->f);
} /* end for */
+ /* checksum verification already done in verify_chksum cb */
+
/* Read in checksum */
UINT32DECODE(image, stored_chksum);
/* Sanity check */
HDassert((size_t)(image - (const uint8_t *)_image) <= udata->header->list_size);
- /* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(_image, ((size_t)(image - (const uint8_t *)_image) - H5SM_SIZEOF_CHECKSUM), 0);
-
- /* Verify checksum */
- if(stored_chksum != computed_chksum)
- HGOTO_ERROR(H5E_SOHM, H5E_BADVALUE, NULL, "incorrect metadata checksum for shared message list")
-
/* Initialize the rest of the array */
for(u = udata->header->num_messages; u < udata->header->list_max; u++)
list->messages[u].location = H5SM_NO_LOC;
diff --git a/src/H5T.c b/src/H5T.c
index fd362eb..030f5a7 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -3602,6 +3602,7 @@ done:
herr_t
H5T_close(H5T_t *dt)
{
+ hbool_t corked; /* Whether the named datatype is corked or not */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -3612,6 +3613,16 @@ H5T_close(H5T_t *dt)
dt->shared->fo_count--;
if(dt->shared->state != H5T_STATE_OPEN || dt->shared->fo_count == 0) {
+ /* Uncork cache entries with object address tag for named datatype only */
+ if(dt->shared->state == H5T_STATE_OPEN && dt->shared->fo_count == 0) {
+ if(H5AC_cork(dt->oloc.file, dt->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
+ HGOTO_ERROR(H5E_ATOM, H5E_SYSTEM, FAIL, "unable to retrieve an object's cork status")
+ if(corked) {
+ if(H5AC_cork(dt->oloc.file, dt->oloc.addr, H5AC__UNCORK, NULL) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_SYSTEM, FAIL, "unable to uncork an object")
+ } /* end if */
+ } /* end if */
+
if(H5T__free(dt) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "unable to free datatype");
@@ -5453,3 +5464,75 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5T_patch_file() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Tflush
+ *
+ * Purpose: Flushes all buffers associated with a named datatype to disk.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * May 19, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Tflush(hid_t type_id)
+{
+ H5T_t *dt; /* Datatype for this operation */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", type_id);
+
+ /* Check args */
+ if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ if(!H5T_is_named(dt))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a committed datatype")
+
+ /* To flush metadata and invoke flush callback if there is */
+ if(H5O_flush_common(&dt->oloc, type_id, H5AC_dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFLUSH, FAIL, "unable to flush datatype and object flush callback")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Tflush */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Trefresh
+ *
+ * Purpose: Refreshes all buffers associated with a named datatype.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * July 21, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Trefresh(hid_t type_id)
+{
+ H5T_t * dt = NULL;
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", type_id);
+
+ /* Check args */
+ if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ if(!H5T_is_named(dt))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a committed datatype")
+
+ /* Call private function to refresh datatype object */
+ if ((H5O_refresh_metadata(type_id, dt->oloc, H5AC_dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTLOAD, FAIL, "unable to refresh datatype")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Trefresh */
+
diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c
index 5ff1c73..783d9d8 100644
--- a/src/H5Tcommit.c
+++ b/src/H5Tcommit.c
@@ -355,7 +355,7 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id, hid_t dxpl_id)
loc_init = TRUE;
/* Set the latest format, if requested */
- if(H5F_USE_LATEST_FORMAT(file))
+ if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DATATYPE))
if(H5T_set_latest_version(type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest version of datatype")
diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h
index d646ef1..df7ad41 100644
--- a/src/H5Tpublic.h
+++ b/src/H5Tpublic.h
@@ -511,6 +511,8 @@ H5_DLL hid_t H5Tget_create_plist(hid_t type_id);
H5_DLL htri_t H5Tcommitted(hid_t type_id);
H5_DLL herr_t H5Tencode(hid_t obj_id, void *buf, size_t *nalloc);
H5_DLL hid_t H5Tdecode(const void *buf);
+H5_DLL herr_t H5Tflush(hid_t type_id);
+H5_DLL herr_t H5Trefresh(hid_t type_id);
/* Operations defined on compound datatypes */
H5_DLL herr_t H5Tinsert(hid_t parent_id, const char *name, size_t offset,
diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h
index 7d3361b..cbe108a 100644
--- a/src/H5VMprivate.h
+++ b/src/H5VMprivate.h
@@ -49,6 +49,39 @@ typedef herr_t (*H5VM_opvv_func_t)(hsize_t dst_off, hsize_t src_off,
#define H5VM_vector_zero(N,DST) HDmemset(DST,0,(N)*sizeof(*(DST)))
+/* Given a coordinate offset array (COORDS) of type TYPE, move the unlimited
+ * dimension (UNLIM_DIM) value to offset 0, sliding any intermediate values down
+ * one position. */
+#define H5VM_swizzle_coords(TYPE,COORDS,UNLIM_DIM) { \
+ /* COORDS must be an array of type TYPE */ \
+ HDassert(sizeof(COORDS[0]) == sizeof(TYPE)); \
+ \
+ /* Nothing to do when unlimited dimension is at position 0 */ \
+ if(0 != (UNLIM_DIM)) { \
+ TYPE _tmp = (COORDS)[UNLIM_DIM]; \
+ \
+ HDmemmove(&(COORDS)[1], &(COORDS)[0], sizeof(TYPE) * (UNLIM_DIM)); \
+ (COORDS)[0] = _tmp; \
+ } /* end if */ \
+}
+
+/* Given a coordinate offset array (COORDS) of type TYPE, move the value at
+ * offset 0 to offset of the unlimied dimension (UNLIM_DIM), sliding any
+ * intermediate values up one position. Undoes the "swizzle_coords" operation.
+ */
+#define H5VM_unswizzle_coords(TYPE,COORDS,UNLIM_DIM) { \
+ /* COORDS must be an array of type TYPE */ \
+ HDassert(sizeof(COORDS[0]) == sizeof(TYPE)); \
+ \
+ /* Nothing to do when unlimited dimension is at position 0 */ \
+ if(0 != (UNLIM_DIM)) { \
+ TYPE _tmp = (COORDS)[0]; \
+ \
+ HDmemmove(&(COORDS)[0], &(COORDS)[1], sizeof(TYPE) * (UNLIM_DIM)); \
+ (COORDS)[UNLIM_DIM] = _tmp; \
+ } /* end if */ \
+}
+
/* A null pointer is equivalent to a zero vector */
#define H5VM_ZERO NULL
diff --git a/src/H5err.txt b/src/H5err.txt
index e0ebf5e..a156316 100644
--- a/src/H5err.txt
+++ b/src/H5err.txt
@@ -175,6 +175,9 @@ MINOR, CACHE, H5E_CANTRESIZE, Unable to resize a metadata cache entry
MINOR, CACHE, H5E_CANTDEPEND, Unable to create a flush dependency
MINOR, CACHE, H5E_CANTUNDEPEND, Unable to destroy a flush dependency
MINOR, CACHE, H5E_CANTNOTIFY, Unable to notify object about action
+MINOR, CACHE, H5E_LOGFAIL, Failure in the cache logging framework
+MINOR, CACHE, H5E_CANTCORK, Unable to cork an object
+MINOR, CACHE, H5E_CANTUNCORK, Unable to uncork an object
# B-tree related errors
MINOR, BTREE, H5E_NOTFOUND, Object not found
diff --git a/src/H5public.h b/src/H5public.h
index 5b95fb7..c4441be 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -95,9 +95,9 @@ extern "C" {
#define H5_VERS_MAJOR 1 /* For major interface/format changes */
#define H5_VERS_MINOR 9 /* For minor interface/format changes */
#define H5_VERS_RELEASE 233 /* For tweaks, bug-fixes, or development */
-#define H5_VERS_SUBRELEASE "" /* For pre-releases like snap0 */
+#define H5_VERS_SUBRELEASE "swmr3" /* For pre-releases like snap0 */
/* Empty string for real releases. */
-#define H5_VERS_INFO "HDF5 library version: 1.9.233" /* Full version string */
+#define H5_VERS_INFO "HDF5 library version: 1.9.233-swmr3" /* Full version string */
#define H5check() H5check_version(H5_VERS_MAJOR,H5_VERS_MINOR, \
H5_VERS_RELEASE)
diff --git a/src/H5trace.c b/src/H5trace.c
index 2fd75ac..44b2ed5 100644
--- a/src/H5trace.c
+++ b/src/H5trace.c
@@ -495,6 +495,52 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
} /* end else */
break;
+ case 'k':
+ if(ptr) {
+ if(vp)
+ fprintf(out, "0x%lx", (unsigned long)vp);
+ else
+ fprintf(out, "NULL");
+ } /* end if */
+ else {
+ H5D_chunk_index_t idx = (H5D_chunk_index_t)va_arg(ap, int);
+
+ switch(idx) {
+ case H5D_CHUNK_IDX_BTREE:
+ fprintf(out, "H5D_CHUNK_IDX_BTREE");
+ break;
+
+ case H5D_CHUNK_IDX_NONE:
+ fprintf(out, "H5D_CHUNK_IDX_NONE");
+ break;
+
+ case H5D_CHUNK_IDX_FARRAY:
+ fprintf(out, "H5D_CHUNK_IDX_FARRAY");
+ break;
+
+ case H5D_CHUNK_IDX_EARRAY:
+ fprintf(out, "H5D_CHUNK_IDX_EARRAY");
+ break;
+
+ case H5D_CHUNK_IDX_BT2:
+ fprintf(out, "H5D_CHUNK_IDX_BT2");
+ break;
+
+ case H5D_CHUNK_IDX_SINGLE:
+ fprintf(out, "H5D_CHUNK_IDX_SINGLE");
+ break;
+
+ case H5D_CHUNK_IDX_NTYPES:
+ fprintf(out, "ERROR: H5D_CHUNK_IDX_NTYPES (invalid value)");
+ break;
+
+ default:
+ fprintf(out, "UNKNOWN VALUE: %ld", (long)idx);
+ break;
+ } /* end switch */
+ } /* end else */
+ break;
+
case 'l':
if(ptr) {
if(vp)
diff --git a/src/Makefile.am b/src/Makefile.am
index d6a5909..7f8babd 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -42,15 +42,15 @@ DISTCLEANFILES=H5pubconf.h
# library sources
libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5A.c H5Abtree2.c H5Adense.c H5Adeprec.c H5Aint.c H5Atest.c \
- H5AC.c \
+ H5AC.c H5AClog.c \
H5B.c H5Bcache.c H5Bdbg.c \
H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2stat.c H5B2test.c \
H5C.c \
H5CS.c \
- H5D.c H5Dbtree.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \
- H5Ddeprec.c H5Defl.c H5Dfill.c H5Dint.c \
- H5Dio.c H5Dlayout.c \
- H5Doh.c H5Dscatgath.c H5Dselect.c H5Dtest.c H5Dvirtual.c \
+ H5D.c H5Dbtree.c H5Dbtree2.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \
+ H5Ddeprec.c H5Dearray.c H5Defl.c H5Dfarray.c H5Dsingle.c H5Dfill.c H5Dint.c \
+ H5Dio.c H5Dlayout.c H5Dnone.c H5Doh.c H5Dscatgath.c \
+ H5Dselect.c H5Dtest.c H5Dvirtual.c \
H5E.c H5Edeprec.c H5Eint.c \
H5EA.c H5EAcache.c H5EAdbg.c H5EAdblkpage.c H5EAdblock.c H5EAhdr.c \
H5EAiblock.c H5EAint.c H5EAsblock.c H5EAstat.c H5EAtest.c \
@@ -59,19 +59,20 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5Fmount.c H5Fquery.c \
H5Fsfile.c H5Fsuper.c H5Fsuper_cache.c H5Ftest.c \
H5FA.c H5FAcache.c H5FAdbg.c H5FAdblock.c H5FAdblkpage.c H5FAhdr.c \
- H5FAstat.c H5FAtest.c \
+ H5FAint.c H5FAstat.c H5FAtest.c \
H5FD.c H5FDcore.c \
H5FDfamily.c H5FDint.c H5FDlog.c \
- H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c \
- H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSsection.c H5FSstat.c H5FStest.c \
+ H5FDmulti.c H5FDsec2.c H5FDspace.c H5FDstdio.c H5FDtest.c \
+ H5FL.c H5FO.c H5FS.c H5FScache.c H5FSdbg.c H5FSint.c H5FSsection.c \
+ H5FSstat.c H5FStest.c \
H5G.c H5Gbtree2.c H5Gcache.c \
H5Gcompact.c H5Gdense.c H5Gdeprec.c H5Gent.c \
H5Gint.c H5Glink.c \
H5Gloc.c H5Gname.c H5Gnode.c H5Gobj.c H5Goh.c H5Groot.c H5Gstab.c H5Gtest.c \
H5Gtraverse.c \
H5HF.c H5HFbtree2.c H5HFcache.c H5HFdbg.c H5HFdblock.c H5HFdtable.c \
- H5HFhdr.c H5HFhuge.c H5HFiblock.c H5HFiter.c H5HFman.c H5HFsection.c \
- H5HFspace.c H5HFstat.c H5HFtest.c H5HFtiny.c \
+ H5HFhdr.c H5HFhuge.c H5HFiblock.c H5HFint.c H5HFiter.c H5HFman.c \
+ H5HFsection.c H5HFspace.c H5HFstat.c H5HFtest.c H5HFtiny.c \
H5HG.c H5HGcache.c H5HGdbg.c H5HGquery.c \
H5HL.c H5HLcache.c H5HLdbg.c H5HLint.c H5HLprfx.c H5HLdblk.c\
H5HP.c H5I.c H5Itest.c H5L.c H5Lexternal.c H5lib_settings.c \
@@ -80,12 +81,14 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5O.c H5Oainfo.c H5Oalloc.c H5Oattr.c \
H5Oattribute.c H5Obogus.c H5Obtreek.c H5Ocache.c H5Ochunk.c \
H5Ocont.c H5Ocopy.c H5Odbg.c H5Odrvinfo.c H5Odtype.c H5Oefl.c \
- H5Ofill.c H5Ofsinfo.c H5Oginfo.c \
+ H5Ofill.c H5Oflush.c H5Ofsinfo.c H5Oginfo.c \
H5Olayout.c \
H5Olinfo.c H5Olink.c H5Omessage.c H5Omtime.c \
- H5Oname.c H5Onull.c H5Opline.c H5Orefcount.c \
- H5Osdspace.c H5Oshared.c H5Ostab.c \
- H5Oshmesg.c H5Otest.c H5Ounknown.c \
+ H5Oname.c H5Onull.c H5Opline.c H5Oproxy.c H5Orefcount.c \
+ H5Osdspace.c H5Oshared.c \
+ H5Oshmesg.c \
+ H5Ostab.c \
+ H5Otest.c H5Ounknown.c \
H5P.c H5Pacpl.c H5Pdapl.c H5Pdcpl.c \
H5Pdeprec.c H5Pdxpl.c H5Pencdec.c \
H5Pfapl.c H5Pfcpl.c H5Pfmpl.c \
@@ -126,7 +129,7 @@ include_HEADERS = hdf5.h H5api_adpt.h H5overflow.h H5pubconf.h H5public.h H5vers
H5Cpublic.h H5Dpublic.h \
H5Epubgen.h H5Epublic.h H5Fpublic.h \
H5FDpublic.h H5FDcore.h H5FDdirect.h \
- H5FDfamily.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
+ H5FDfamily.h H5FDlog.h H5FDmpi.h H5FDmpio.h \
H5FDmulti.h H5FDsec2.h H5FDstdio.h \
H5Gpublic.h H5Ipublic.h H5Lpublic.h \
H5MMpublic.h H5Opublic.h H5Ppublic.h \
diff --git a/test/AtomicWriterReader.txt b/test/AtomicWriterReader.txt
new file mode 100644
index 0000000..dc0a3bd
--- /dev/null
+++ b/test/AtomicWriterReader.txt
@@ -0,0 +1,48 @@
+Atomic Tests Instructions
+=========================
+
+Purpose:
+--------
+This documents how to build and run the Atomic Writer and Reader tests.
+The atomic test is to verify if atomic read-write operation on a system works.
+The two programs are atomic_writer.c and atomic_reader.c.
+atomic_writer.c: is the "write" part of the test; and
+atomic_reader.c: is the "read" part of the test.
+
+Building the Tests
+------------------
+The two test parts are automically built during configure and make process.
+But to build them individually, you can do in test/ directory:
+$ gcc atomic_writer
+$ gcc atomic_reader
+
+Running the Tests
+-----------------
+$ atomic_writer -n <number of integers to write> -i <number of iterations for writer>
+$ atomic_reader -n <number of integers to read> -i <number of iterations for reader>
+
+Note**
+(1) "atomic_data" is the data file used by both the writer/reader in the
+ current directory.
+(2) The value for -n should be the same for both the writer and the reader.
+(3) The values for options n and i should be positive integers.
+(4) For this version, the user has to provide both options -n and -i to run
+ the writer and the reader.
+(5) If the user wants to run the writer for a long time, just provides a
+ large number for -i.
+
+Examples
+--------
+$ ./atomic_writer -n 10000 -i 5
+ Try to atomic write 10000 integers patterns 10000 time, and iterate the whole
+ write process 5 times.
+
+$ ./atomic_reader -n 10000 -i 2
+ Try to atomic read 10000 integers patterns 10000 times, and iterate only once.
+ A summary is posted at the end. If all atomic reads are correct, it will not
+ show any read beyond "0 re-tries", that is all reads have succeeded in the
+ first read attempt.
+
+Remark:
+You usually want the writer to iterate more times than the reader so that
+the writing will not finish before reading is done.
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index d2dd282..3a69371 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -209,6 +209,8 @@ set (H5_TESTS
ohdr
stab
gheap
+ cache_logging
+ cork
pool
istore
bittests
@@ -216,6 +218,7 @@ set (H5_TESTS
dtypes
dsets
cmpd_dset
+ chunk_info
filter_fail
extend
external
@@ -232,6 +235,7 @@ set (H5_TESTS
app_ref
enum
set_extent
+ space_overflow
getname
vfd
ntypes
@@ -252,6 +256,7 @@ set (H5_TESTS
enc_dec_plist
enc_dec_plist_cross_platform
unregister
+ swmr
)
foreach (test ${H5_TESTS})
@@ -402,6 +407,21 @@ if (BUILD_SHARED_LIBS)
set_target_properties (links_env-shared PROPERTIES FOLDER test)
endif (BUILD_SHARED_LIBS)
+#-- Adding test for accum_swmr_reader
+# This has to be copied to the test directory for execve() to find it
+# and it can't be renamed (i.e., no <foo>-shared).
+add_executable (accum_swmr_reader ${HDF5_TEST_SOURCE_DIR}/accum_swmr_reader.c)
+TARGET_NAMING (accum_swmr_reader STATIC)
+TARGET_C_PROPERTIES (accum_swmr_reader STATIC " " " ")
+target_link_libraries (accum_swmr_reader ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (accum_swmr_reader PROPERTIES FOLDER test)
+
+#-- Set accum dependencies
+set_target_properties (accum PROPERTIES DEPENDS accum_swmr_reader)
+if (BUILD_SHARED_LIBS)
+ set_target_properties (accum-shared PROPERTIES DEPENDS accum_swmr_reader-shared)
+endif (BUILD_SHARED_LIBS)
+
#-- Adding test for libinfo
set (GREP_RUNNER ${PROJECT_BINARY_DIR}/GrepRunner.cmake)
file (WRITE ${GREP_RUNNER}
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index 64031eb..fc8fdc3 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -43,6 +43,21 @@ set (HDF5_TEST_FILES
tnullspace.h5
)
+add_custom_command (
+ TARGET accum_swmr_reader
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy_if_different "$<TARGET_FILE:accum_swmr_reader>" "${PROJECT_BINARY_DIR}/H5TEST/accum_swmr_reader"
+)
+if (BUILD_SHARED_LIBS)
+ add_custom_command (
+ TARGET accum_swmr_reader
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy_if_different "$<TARGET_FILE:accum_swmr_reader>" "${PROJECT_BINARY_DIR}/H5TEST-shared/accum_swmr_reader"
+ )
+endif (BUILD_SHARED_LIBS)
+
foreach (h5_tfile ${HDF5_TEST_FILES})
set (dest "${PROJECT_BINARY_DIR}/H5TEST/${h5_tfile}")
add_custom_command (
@@ -229,6 +244,8 @@ set (HDF5_REFERENCE_TEST_FILES
be_data.h5
be_extlink1.h5
be_extlink2.h5
+ btree_idx_1_6.h5
+ btree_idx_1_8.h5
corrupt_stab_msg.h5
deflate.h5
family_v16_00000.h5
@@ -942,7 +959,7 @@ if (HDF5_TEST_VFD)
set (H5_VFD_TESTS
testhdf5
- accum
+# accum
lheap
ohdr
stab
diff --git a/test/Makefile.am b/test/Makefile.am
index d9c53d4..59ba43b 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -23,9 +23,23 @@ include $(top_srcdir)/config/commence.am
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src
-# Test script for error_test and err_compat
-TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh
-SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT)
+# Test scripts--
+# testerror.sh: err_compat, error_test
+# testlibinfo.sh:
+# testcheck_version.sh: tcheck_version
+# tetlinks_env.sh: links_env
+# testflushrefresh.sh: flushrefresh
+# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+# testswmr.sh: swmr*
+# testvdsswmr.sh: vds_swmr*
+TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh \
+ testswmr.sh testvdsswmr.sh testflushrefresh.sh test_usecases.sh
+SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \
+ flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \
+ swmr_generator$(EXEEXT) swmr_reader$(EXEEXT) swmr_writer$(EXEEXT) \
+ swmr_remove_reader$(EXEEXT) swmr_remove_writer$(EXEEXT) swmr_addrem_writer$(EXEEXT) \
+ swmr_sparse_reader$(EXEEXT) swmr_sparse_writer$(EXEEXT) swmr_start_write$(EXEEXT) \
+ vds_swmr_gen$(EXEEXT) vds_swmr_reader$(EXEEXT) vds_swmr_writer$(EXEEXT)
if HAVE_SHARED_CONDITIONAL
TEST_SCRIPT += test_plugin.sh
SCRIPT_DEPEND += plugin$(EXEEXT)
@@ -43,18 +57,31 @@ check_SCRIPTS = $(TEST_SCRIPT)
TEST_PROG= testhdf5 lheap ohdr stab gheap cache cache_api cache_tagging \
pool accum hyperslab istore bittests dt_arith \
dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \
- big mtime fillval mount flush1 flush2 app_ref enum \
+ twriteorder big mtime fillval mount flush1 flush2 app_ref enum \
set_extent ttsafe enc_dec_plist enc_dec_plist_cross_platform\
getname vfd ntypes dangle dtransform reserved cross_read \
- freespace mf vds farray earray btree2 fheap file_image unregister
+ freespace mf vds farray earray btree2 fheap file_image unregister \
+ cache_logging cork swmr
-# List programs to be built when testing here. error_test and err_compat are
-# built at the same time as the other tests, but executed by testerror.sh.
+# List programs to be built when testing here.
+# error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
# tcheck_version is used by testcheck_version.sh.
+# accum_swmr_reader is used by accum.c.
+# atomic_writer and atomic_reader are standalone programs.
+# links_env is used by testlinks_env.sh
+# flushrefresh is used by testflushrefresh.sh.
+# use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
+# swmr_* files (besides swmr.c) are used by testswmr.sh.
+# vds_swmr_* files are used by testvdsswmr.sh
# 'make check' doesn't run them directly, so they are not included in TEST_PROG.
# Also build testmeta, which is used for timings test. It builds quickly,
# and this lets automake keep all its test programs in one place.
-check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version testmeta links_env
+check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \
+ testmeta accum_swmr_reader atomic_writer atomic_reader \
+ links_env flushrefresh use_append_chunk use_append_mchunks use_disable_mdc_flushes \
+ swmr_generator swmr_start_write swmr_reader swmr_writer swmr_remove_reader \
+ swmr_remove_writer swmr_addrem_writer swmr_sparse_reader swmr_sparse_writer \
+ swmr_check_compat_vfd vds_swmr_gen vds_swmr_reader vds_swmr_writer
if HAVE_SHARED_CONDITIONAL
check_PROGRAMS+= plugin
endif
@@ -66,7 +93,7 @@ endif
# --enable-build-all at configure time.
# The gen_old_* files can only be compiled with older versions of the library
# so do not appear in this list.
-BUILD_ALL_PROGS=gen_bad_ohdr gen_bogus gen_cross gen_deflate gen_filters gen_new_array \
+BUILD_ALL_PROGS=gen_bad_ohdr gen_bogus gen_cross gen_deflate gen_filters gen_idx gen_new_array \
gen_new_fill gen_new_group gen_new_mtime gen_new_super gen_noencoder \
gen_nullspace gen_udlinks space_overflow gen_filespace gen_specmetaread \
gen_sizes_lheap gen_file_image gen_plist
@@ -94,7 +121,7 @@ else
noinst_LTLIBRARIES=libh5test.la
endif
-libh5test_la_SOURCES=h5test.c testframe.c cache_common.c
+libh5test_la_SOURCES=h5test.c testframe.c cache_common.c swmr_common.c
# Use libhd5test.la to compile all of the tests
LDADD=libh5test.la $(LIBHDF5)
@@ -126,9 +153,11 @@ flush2.chkexe_: flush1.chkexe_
# specifying a file prefix or low-level driver. Changing the file
# prefix or low-level driver with environment variables will influence
# the temporary file name in ways that the makefile is not aware of.
-CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offset.h5 \
+CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 \
+ dset_offset.h5 chunk_fixed.h5 cache_logging.out \
max_compact_dataset.h5 simple.h5 set_local.h5 random_chunks.h5 \
- huge_chunks.h5 chunk_cache.h5 big_chunk.h5 chunk_expand.h5 \
+ huge_chunks.h5 chunk_cache.h5 big_chunk.h5 chunk_fast.h5 \
+ chunk_expand.h5 layout_extend.h5 swmr_fail.h5 partial_chunks.h5 \
copy_dcpl_newfile.h5 extend.h5 istore.h5 extlinks*.h5 frspace.h5 links*.h5 \
sys_file1 tfile[1-7].h5 th5s[1-4].h5 lheap.h5 fheap.h5 ohdr.h5 \
stab.h5 extern_[1-3].h5 extern_[1-4][ab].raw gheap[0-4].h5 \
@@ -137,7 +166,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
stdio.h5 sec2.h5 dtypes[0-9].h5 dtypes1[0].h5 dt_arith[1-2].h5 tattr.h5 \
tselect.h5 mtime.h5 unlink.h5 unicode.h5 coord.h5 \
fillval_[0-9].h5 fillval.raw mount_[0-9].h5 testmeta.h5 ttime.h5 \
- trefer[1-3].h5 tvltypes.h5 tvlstr.h5 tvlstr2.h5 flush.h5 \
+ trefer[1-3].h5 tvltypes.h5 tvlstr.h5 tvlstr2.h5 twriteorder.dat flush.h5 \
enum1.h5 titerate.h5 ttsafe.h5 tarray1.h5 tgenprop.h5 \
tmisc[0-9]*.h5 set_extent[1-5].h5 ext[12].bin \
getname.h5 getname[1-3].h5 sec2_file.h5 direct_file.h5 \
@@ -151,7 +180,13 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
new_multi_file_v16-r.h5 new_multi_file_v16-s.h5 \
split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \
file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \
- vds_virt.h5 vds_src_[0-1].h5
+ vds_virt.h5 vds_src_[0-1].h5 \
+ swmr_data.h5 use_use_append_chunk.h5 use_append_mchunks.h5 use_disable_mdc_flushes.h5 \
+ flushrefresh.h5 flushrefresh_VERIFICATION_START \
+ flushrefresh_VERIFICATION_CHECKPOINT1 flushrefresh_VERIFICATION_CHECKPOINT2 \
+ flushrefresh_VERIFICATION_DONE atomic_data accum_swmr_big.h5 ohdr_swmr.h5 \
+ test_swmr*.h5 cache_logging.h5 vds_swmr.h5 vds_swmr_src_*.h5 \
+ earray_hdr_fd.h5
# Sources for testhdf5 executable
testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
@@ -159,7 +194,13 @@ testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
trefer.c trefstr.c tselect.c tskiplist.c tsohm.c ttime.c ttst.c tunicode.c \
tvlstr.c tvltypes.c
+# Sources for Use Cases
+use_append_chunk_SOURCES=use_append_chunk.c use_common.c
+use_append_mchunks_SOURCES=use_append_mchunks.c use_common.c
+use_disable_mdc_flushes_SOURCES=use_disable_mdc_flushes.c
+
# Temporary files.
-DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_plugin.sh
+DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_plugin.sh \
+ testswmr.sh testvdsswmr.sh test_usecases.sh testflushrefresh.sh
include $(top_srcdir)/config/conclude.am
diff --git a/test/POSIX_Order_Write_Test_Report.docx b/test/POSIX_Order_Write_Test_Report.docx
new file mode 100644
index 0000000..cf6d1dc
--- /dev/null
+++ b/test/POSIX_Order_Write_Test_Report.docx
Binary files differ
diff --git a/test/POSIX_Order_Write_Test_Report.pdf b/test/POSIX_Order_Write_Test_Report.pdf
new file mode 100644
index 0000000..0c678c4
--- /dev/null
+++ b/test/POSIX_Order_Write_Test_Report.pdf
Binary files differ
diff --git a/test/SWMR_POSIX_Order_UG.txt b/test/SWMR_POSIX_Order_UG.txt
new file mode 100644
index 0000000..2771af1
--- /dev/null
+++ b/test/SWMR_POSIX_Order_UG.txt
@@ -0,0 +1,94 @@
+POSIX Write Order Test Instructions
+===================================
+
+Purpose
+-------
+This documents shows the requirments, implementaion design and instructions
+of building and running the POSIX Write Order test. The name of the
+test is twriteorder and it resides in the test/ directory.
+
+Requirements
+------------
+The test is to verify that the write order is strictly consistent.
+The SWMR feature requires that the order of write is strictly consistent.
+"Strict consistency in computer science is the most stringent consistency
+model. It says that a read operation has to return the result of the
+latest write operation which occurred on that data item."--
+(http://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability).
+This is also an alternative form of what POSIX write require that after a
+write operation has returned success, all reads issued afterward should
+get the same data the write has written.
+
+Implementation Design
+---------------------
+The test simulates what SWMR does by writing chained blocks and see if
+they can be read back correctly.
+There is a writer process and a read process.
+The file is divided into 2KB partitions. Then writer writes 1 chained
+block, each of 1KB big, in each partition after the first partition.
+Each chained block has this structure:
+Byte 0-3: offset address of its child block. The last child uses 0 as NULL.
+Byte 4-1023: some artificial data.
+The child block address of Block 1 is NULL (0).
+The child block address of Block 2 is the offset address of Block 1.
+The child block address of Block n is the offset address of Block n-1.
+After all n blocks are written, the offset address of Block n is written
+to the offset 0 of the first partition.
+Therefore, by the time the offset address of Block n is written to this
+position, all n chain-linked blocks have been written.
+
+The other reader processes will try to read the address value at the
+offset 0. The value is initially NULL(0). When it changes to non-zero,
+it signifies the writer process has written all the chain-link blocks
+and they are ready for the reader processes to access.
+
+If the system, in which the writer and reader processes run, the readers
+will always get all chain-linked blocks correctly. If the order of write
+is not maintained, some reader processes may found unexpect block data.
+
+Building the Tests
+------------------
+The name of the test is twriteorder in the test directory. It is added
+to the test suite and is built during the "make" process and is run by
+the test_usecases.sh test. Users may inspect test/test_usecases.sh.in
+to see the examples of testing.
+
+Running the Tests
+-----------------
+twriteorder test accepts the following options:
+$ ./twriteorder -h
+usage: twriteorder [OPTIONS]
+ OPTIONS
+ -h Print a usage message and exit
+ -l w|r launch writer or reader only. [default: launch both]
+ -b N Block size [default: 1024]
+ -p N Partition size [default: 2048]
+ -n N Number of linked blocks [default: 512]
+
+More Examples
+-------------
+
+# run test with default parameters and launch both writer and reader
+#processes.
+$ twriteorder
+
+# run test with blocksize of 1000 bytes (default is 1024 bytes).
+$ twriteorder -b 1000
+
+# run test with partition size of 3000 bytes (default is 2048 bytes).
+$ twriteorder -p 3000
+
+# run test with 2000 linked blocks (default is 512 blocks).
+$ twriteorder -n 2000
+
+# Launch only the writer process.
+$ twriteorder -l w
+
+# Launch only the reader process.
+$ twriteorder -l r
+
+Note that if you want to launch the writer and the reader processes
+manually (for example in different machines sharing a common file system),
+you need to start the writer process (-l w) first, and then the reader
+process (-l r).
+
diff --git a/test/SWMR_UseCase_UG.txt b/test/SWMR_UseCase_UG.txt
new file mode 100644
index 0000000..e29944a
--- /dev/null
+++ b/test/SWMR_UseCase_UG.txt
@@ -0,0 +1,223 @@
+1. Title:
+ User Guide for SWMR Use Case Programs
+
+2. Purpose:
+ This is a User Guide of the SWMR Use Case programs. It descibes the use
+ case program and explain how to run them.
+
+2.1. Author and Dates:
+ Version 2: By Albert Cheng (acheng@hdfgroup.org), 2013/06/18.
+ Version 1: By Albert Cheng (acheng@hdfgroup.org), 2013/06/01.
+
+
+%%%%Use Case 1.7%%%%
+
+3. Use Case [1.7]:
+ Appending a single chunk
+
+3.1. Program name:
+ use_append_chunk
+
+3.2. Description:
+ Appending a single chunk of raw data to a dataset along an unlimited
+ dimension within a pre-created file and reading the new data back.
+
+ It first creates one 3d dataset using chunked storage, each chunk
+ is a (1, chunksize, chunksize) square. The dataset is (unlimited,
+ chunksize, chunksize). Data type is 2 bytes integer. It starts out
+ "empty", i.e., first dimension is 0.
+
+ The writer then appends planes, each of (1,chunksize,chunksize)
+ to the dataset. Fills each plan with plane number and then writes
+ it at the nth plane. Increases the plane number and repeats till
+ the end of dataset, when it reaches chunksize long. End product is
+ a chunksize^3 cube.
+
+ The reader is a separated process, running in parallel with
+ the writer. It reads planes from the dataset. It expects the
+ dataset is being changed (growing). It checks the unlimited dimension
+ (dimension[0]). When it increases, it will read in the new planes, one
+ by one, and verify the data correctness. (The nth plan should contain
+ all "n".) When the unlimited dimension grows to the chunksize (it
+ becomes a cube), that is the expected end of data, the reader exits.
+
+3.3. How to run the program:
+ Simplest way is
+ $ use_append_chunk
+
+ It creates a skeleton dataset (0,256,256) of shorts. Then fork off
+ a process, which becomes the reader process to read planes from the
+ dataset, while the original process continues as the writer process
+ to append planes onto the dataset.
+
+ Other possible options:
+
+ 1. -z option: different chunksize. Default is 256.
+ $ use_append_chunk -z 1024
+
+ It uses (1,1024,1024) chunks to produce a 1024^3 cube, about 2GB big.
+
+
+ 2. -f filename: different dataset file name
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5
+
+ The data file is /gpfs/tmp/append_data.h5. This allows two independent
+ processes in separated compute nodes to access the datafile on the
+ shared /gpfs file system.
+
+
+ 3. -l option: launch only the reader or writer process.
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5 -l w # in node X
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5 -l r # in node Y
+
+ In node X, launch the writer process, which creates the data file
+ and appends to it.
+ In node Y, launch the read process to read the data file.
+
+ Note that you need to time the read process to start AFTER the write
+ process has created the skeleton data file. Otherwise, the reader
+ will encounter errors such as data file not found.
+
+ 4. -n option: number of planes to write/read. Default is same as the
+ chunk size as specified by option -z.
+ $ use_append_chunk -n 1000 # 1000 planes are writtern and read.
+
+ 5. -s option: use SWMR file access mode or not. Default is yes.
+ $ use_append_chunk -s 0
+
+ It opens the HDF5 data file without the SWMR access mode (0 means
+ off). This likely will result in error. This option is provided for
+ users to see the effect of the neede SWMR access mode for concurrent
+ access.
+
+3.4. Test Shell Script:
+ The Use Case program is installed in the test/ directory and is
+ compiled as part of the make process. A test script (test_usecases.sh)
+ is installed in the same directory to test the use case programs. The
+ test script is rather basic and is more for demonstrating how to
+ use the program.
+
+
+%%%%Use Case 1.8%%%%
+
+4. Use Case [1.8]:
+ Appending a hyperslab of multiple chunks.
+
+4.1. Program name:
+ use_append_mchunks
+
+4.2. Description:
+ Appending a hyperslab that spans several chunks of a dataset with
+ unlimited dimensions within a pre-created file and reading the new
+ data back.
+
+ It first creates one 3d dataset using chunked storage, each chunk is a (1,
+ chunksize, chunksize) square. The dataset is (unlimited, 2*chunksize,
+ 2*chunksize). Data type is 2 bytes integer. Therefore, each plane
+ consists of 4 chunks. It starts out "empty", i.e., first dimension is 0.
+
+ The writer then appends planes, each of (1,2*chunksize,2*chunksize)
+ to the dataset. Fills each plan with plane number and then writes
+ it at the nth plane. Increases the plane number and repeats till
+ the end of dataset, when it reaches chunksize long. End product is
+ a (2*chunksize)^3 cube.
+
+ The reader is a separated process, running in parallel with
+ the writer. It reads planes from the dataset. It expects the
+ dataset is being changed (growing). It checks the unlimited dimension
+ (dimension[0]). When it increases, it will read in the new planes, one
+ by one, and verify the data correctness. (The nth plan should contain
+ all "n".) When the unlimited dimension grows to the 2*chunksize (it
+ becomes a cube), that is the expected end of data, the reader exits.
+
+4.3. How to run the program:
+ Simplest way is
+ $ use_append_mchunks
+
+ It creates a skeleton dataset (0,512,512) of shorts. Then fork off
+ a process, which becomes the reader process to read planes from the
+ dataset, while the original process continues as the writer process
+ to append planes onto the dataset.
+
+ Other possible options:
+
+ 1. -z option: different chunksize. Default is 256.
+ $ use_append_mchunks -z 512
+
+ It uses (1,512,512) chunks to produce a 1024^3 cube, about 2GB big.
+
+
+ 2. -f filename: different dataset file name
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5
+
+ The data file is /gpfs/tmp/append_data.h5. This allows two independent
+ processes in separated compute nodes to access the datafile on the
+ shared /gpfs file system.
+
+
+ 3. -l option: launch only the reader or writer process.
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5 -l w # in node X
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5 -l r # in node Y
+
+ In node X, launch the writer process, which creates the data file
+ and appends to it.
+ In node Y, launch the read process to read the data file.
+
+ Note that you need to time the read process to start AFTER the write
+ process has created the skeleton data file. Otherwise, the reader
+ will encounter errors such as data file not found.
+
+ 4. -n option: number of planes to write/read. Default is same as the
+ chunk size as specified by option -z.
+ $ use_append_mchunks -n 1000 # 1000 planes are writtern and read.
+
+ 5. -s option: use SWMR file access mode or not. Default is yes.
+ $ use_append_mchunks -s 0
+
+ It opens the HDF5 data file without the SWMR access mode (0 means
+ off). This likely will result in error. This option is provided for
+ users to see the effect of the neede SWMR access mode for concurrent
+ access.
+
+4.4. Test Shell Script:
+ The Use Case program is installed in the test/ directory and is
+ compiled as part of the make process. A test script (test_usecases.sh)
+ is installed in the same directory to test the use case programs. The
+ test script is rather basic and is more for demonstrating how to
+ use the program.
+
+
+%%%%Use Case 1.9%%%%
+
+5. Use Case [1.9]:
+ Appending n-1 dimensional planes
+
+5.1. Program names:
+ use_append_chunk and use_append_mchunks
+
+5.2. Description:
+ Appending n-1 dimensional planes or regions to a chunked dataset where
+ the data does not fill the chunk.
+
+ This means the chunks have multiple planes and when a plane is written,
+ only one of the planes in each chunk is written. This use case is
+ achieved by extending the previous use cases 1.7 and 1.8 by defining the
+ chunks to have more than 1 plane. The -y option is implemented for both
+ use_append_chunk and use_append_mchunks.
+
+5.3. How to run the program:
+ Simplest way is
+ $ use_append_mchunks -y 5
+
+ It creates a skeleton dataset (0,512,512), with storage chunks (5,512,512)
+ of shorts. It then proceeds like use case 1.8 by forking off a reader
+ process. The original process continues as the writer process that
+ writes 1 plane at a time, updating parts of the chunks involved. The
+ reader reads 1 plane at a time, retrieving data from partial chunks.
+
+ The other possible options will work just like the two use cases.
+
+5.4. Test Shell Script:
+ Commands are added with -y options to demonstrate how the two use case
+ programs can be used as for this use case.
+
diff --git a/test/accum.c b/test/accum.c
index 268b7e1..751e5df 100644
--- a/test/accum.c
+++ b/test/accum.c
@@ -18,13 +18,20 @@
#include "h5test.h"
#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
#include "H5Fpkg.h"
-#include "H5FDprivate.h"
+#include "H5FDpkg.h"
#include "H5Iprivate.h"
/* Filename */
#define FILENAME "accum.h5"
+/* The file name is the same as the define in accum_swmr_reader.c */
+#define SWMR_FILENAME "accum_swmr_big.h5"
+/* The reader forked by test_swmr_write_big() */
+#define SWMR_READER "accum_swmr_reader"
+
/* "big" I/O test values */
#define BIG_BUF_SIZE (6 * 1024 * 1024)
@@ -50,6 +57,7 @@ unsigned test_read_after(const H5F_io_info_t *fio_info);
unsigned test_free(const H5F_io_info_t *fio_info);
unsigned test_big(const H5F_io_info_t *fio_info);
unsigned test_random_write(const H5F_io_info_t *fio_info);
+unsigned test_swmr_write_big(hbool_t newest_format);
/* Helper Function Prototypes */
void accum_printf(void);
@@ -124,6 +132,10 @@ main(void)
if(H5Fclose(fid) < 0) TEST_ERROR
HDremove(FILENAME);
+ /* This test uses a different file */
+ nerrors += test_swmr_write_big(TRUE);
+ nerrors += test_swmr_write_big(FALSE);
+
if(nerrors)
goto error;
puts("All metadata accumulator tests passed.");
@@ -164,7 +176,7 @@ test_write_read(const H5F_io_info_t *fio_info)
/* Allocate buffers */
write_buf = (int *)HDmalloc(1024 * sizeof(int));
HDassert(write_buf);
- read_buf = (int *)HDcalloc(1024, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)1024, sizeof(int));
HDassert(read_buf);
/* Fill buffer with data, zero out read buffer */
@@ -175,7 +187,7 @@ test_write_read(const H5F_io_info_t *fio_info)
/* Write 1KB at Address 0 */
if(accum_write(0, 1024, write_buf) < 0) FAIL_STACK_ERROR;
if(accum_read(0, 1024, read_buf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(write_buf, read_buf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(write_buf, read_buf, (size_t)1024) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -220,7 +232,7 @@ test_write_read_nonacc_front(const H5F_io_info_t *fio_info)
/* Allocate buffers */
write_buf = (int *)HDmalloc(2048 * sizeof(int));
HDassert(write_buf);
- read_buf = (int *)HDcalloc(2048, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)2048, sizeof(int));
HDassert(read_buf);
/* Fill buffer with data, zero out read buffer */
@@ -234,7 +246,7 @@ test_write_read_nonacc_front(const H5F_io_info_t *fio_info)
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
if(accum_write(1024, 1024, write_buf) < 0) FAIL_STACK_ERROR;
if(accum_read(0, 1024, read_buf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(write_buf, read_buf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(write_buf, read_buf, (size_t)1024) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -279,7 +291,7 @@ test_write_read_nonacc_end(const H5F_io_info_t *fio_info)
/* Allocate buffers */
write_buf = (int *)HDmalloc(2048 * sizeof(int));
HDassert(write_buf);
- read_buf = (int *)HDcalloc(2048, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)2048, sizeof(int));
HDassert(read_buf);
/* Fill buffer with data, zero out read buffer */
@@ -293,7 +305,7 @@ test_write_read_nonacc_end(const H5F_io_info_t *fio_info)
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
if(accum_write(0, 1024, write_buf) < 0) FAIL_STACK_ERROR;
if(accum_read(1024, 1024, read_buf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(write_buf, read_buf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(write_buf, read_buf, (size_t)1024) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -525,7 +537,7 @@ test_accum_overlap(const H5F_io_info_t *fio_info)
/* Allocate buffers */
wbuf = (int32_t *)HDmalloc(4096 * sizeof(int32_t));
HDassert(wbuf);
- rbuf = (int32_t *)HDcalloc(4096, sizeof(int32_t));
+ rbuf = (int32_t *)HDcalloc((size_t)4096, sizeof(int32_t));
HDassert(rbuf);
/* Case 1: No metadata in accumulator */
@@ -697,7 +709,7 @@ test_accum_overlap_clean(const H5F_io_info_t *fio_info)
/* Allocate buffers */
wbuf = (int32_t *)HDmalloc(4096 * sizeof(int32_t));
HDassert(wbuf);
- rbuf = (int32_t *)HDcalloc(4096, sizeof(int32_t));
+ rbuf = (int32_t *)HDcalloc((size_t)4096, sizeof(int32_t));
HDassert(rbuf);
/* Case 1: No metadata in accumulator */
@@ -876,7 +888,7 @@ test_accum_non_overlap_size(const H5F_io_info_t *fio_info)
/* Allocate buffers */
wbuf = (int *)HDmalloc(4096 * sizeof(int32_t));
HDassert(wbuf);
- rbuf = (int *)HDcalloc(4096, sizeof(int32_t));
+ rbuf = (int *)HDcalloc((size_t)4096, sizeof(int32_t));
HDassert(rbuf);
/* Case 1: No metadata in accumulator */
@@ -943,7 +955,7 @@ test_accum_overlap_size(const H5F_io_info_t *fio_info)
/* Allocate buffers */
wbuf = (int32_t *)HDmalloc(4096 * sizeof(int32_t));
HDassert(wbuf);
- rbuf = (int32_t *)HDcalloc(4096, sizeof(int32_t));
+ rbuf = (int32_t *)HDcalloc((size_t)4096, sizeof(int32_t));
HDassert(rbuf);
/* Case 1: No metadata in accumulator */
@@ -1050,11 +1062,11 @@ test_accum_adjust(const H5F_io_info_t *fio_info)
/* Read back and verify first write */
if(accum_read((1024 * 1024), (1024 * 1024) - 1, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, (1024 * 1024) - 1) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)((1024 * 1024) - 1)) != 0) TEST_ERROR;
/* Read back and verify second write */
if(accum_read((1024 * 1024) - 1024, 1024, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)1024) != 0) TEST_ERROR;
/* Reset accumulator for next case */
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1080,10 +1092,10 @@ test_accum_adjust(const H5F_io_info_t *fio_info)
/* Read back and verify both pieces of data */
if(accum_read(1048576, 1048575, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 1048576) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)1048576) != 0) TEST_ERROR;
if(accum_read(5, 1048571, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 1048571) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)1048571) != 0) TEST_ERROR;
/* Reset accumulator for next case */
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1118,7 +1130,7 @@ test_accum_adjust(const H5F_io_info_t *fio_info)
/* Read in the piece we wrote to disk above, and then verify that
the data is as expected */
if(accum_read((1024 * 1024) - 1, 1024, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 1024) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)1024) != 0) TEST_ERROR;
/* Reset accumulator for next case */
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1158,7 +1170,7 @@ test_accum_adjust(const H5F_io_info_t *fio_info)
/* Read in the piece we wrote to disk above, and then verify that
the data is as expected */
if(accum_read(1048571, 349523, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 349523) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)349523) != 0) TEST_ERROR;
/* Reset accumulator for next case */
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1195,7 +1207,7 @@ test_accum_adjust(const H5F_io_info_t *fio_info)
/* Read in the piece we wrote to disk above, and then verify that
the data is as expected */
if(accum_read((1024 * 1024) - 5, 10, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 10) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)10) != 0) TEST_ERROR;
/* Reset accumulator for next case */
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1227,7 +1239,7 @@ test_accum_adjust(const H5F_io_info_t *fio_info)
/* Read in the piece we wrote to disk above, and then verify that
the data is as expected */
if(accum_read(1048571, 349523, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 349523) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)349523) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1310,7 +1322,7 @@ test_read_after(const H5F_io_info_t *fio_info)
/* Read in the piece we wrote to disk above, and then verify that
the data is as expected */
if(accum_read(512, 512, rbuf) < 0) FAIL_STACK_ERROR;
- if(HDmemcmp(wbuf, rbuf, 128) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)128) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1352,13 +1364,13 @@ test_big(const H5F_io_info_t *fio_info)
unsigned u; /* Local index variable */
/* Allocate space for the write & read buffers */
- wbuf = (uint8_t *)HDmalloc(BIG_BUF_SIZE);
+ wbuf = (uint8_t *)HDmalloc((size_t)BIG_BUF_SIZE);
HDassert(wbuf);
- wbuf2 = (uint8_t *)HDmalloc(BIG_BUF_SIZE);
+ wbuf2 = (uint8_t *)HDmalloc((size_t)BIG_BUF_SIZE);
HDassert(wbuf2);
- rbuf = (uint8_t *)HDcalloc(BIG_BUF_SIZE + 1536, 1);
+ rbuf = (uint8_t *)HDcalloc((size_t)(BIG_BUF_SIZE + 1536), (size_t)1);
HDassert(rbuf);
- zbuf = (uint8_t *)HDcalloc(BIG_BUF_SIZE + 1536, 1);
+ zbuf = (uint8_t *)HDcalloc((size_t)(BIG_BUF_SIZE + 1536), (size_t)1);
HDassert(zbuf);
/* Initialize write buffers */
@@ -1376,12 +1388,12 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(0, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ HDmemset(rbuf, 0, (size_t)BIG_BUF_SIZE);
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1393,14 +1405,14 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(zbuf, rbuf, 1024) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf, rbuf + 1024, 1024) != 0) TEST_ERROR;
- if(HDmemcmp(zbuf, rbuf + 2048, (BIG_BUF_SIZE - 2048)) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf, (size_t)1024) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf + 1024, (size_t)1024) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + 2048, (size_t)(BIG_BUF_SIZE - 2048)) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(1024, 1024, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ HDmemset(rbuf, 0, (size_t)BIG_BUF_SIZE);
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1412,13 +1424,13 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(zbuf, rbuf, (BIG_BUF_SIZE - 512)) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf, rbuf + (BIG_BUF_SIZE - 512), 512) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf, (size_t)(BIG_BUF_SIZE - 512)) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf + (BIG_BUF_SIZE - 512), (size_t)512) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(BIG_BUF_SIZE - 512, 1024, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ HDmemset(rbuf, 0, (size_t)BIG_BUF_SIZE);
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1430,13 +1442,13 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(512, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf + 512, rbuf, 512) != 0) TEST_ERROR;
- if(HDmemcmp(zbuf, rbuf + 512, (BIG_BUF_SIZE - 512)) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf + 512, rbuf, (size_t)512) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + 512, (size_t)(BIG_BUF_SIZE - 512)) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(0, 1024, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ HDmemset(rbuf, 0, (size_t)BIG_BUF_SIZE);
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1453,12 +1465,12 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf2, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(0, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE);
+ HDmemset(rbuf, 0, (size_t)BIG_BUF_SIZE);
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1475,13 +1487,13 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE + 512, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf2, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf + 512, rbuf + BIG_BUF_SIZE, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf + 512, rbuf + BIG_BUF_SIZE, (size_t)512) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(0, BIG_BUF_SIZE + 512, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE + 512);
+ HDmemset(rbuf, 0, (size_t)(BIG_BUF_SIZE + 512));
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1502,14 +1514,14 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE + 1024, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf2, rbuf, BIG_BUF_SIZE) != 0) TEST_ERROR;
- if(HDmemcmp(zbuf, rbuf + BIG_BUF_SIZE, 512) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf, rbuf + BIG_BUF_SIZE + 512, 512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + BIG_BUF_SIZE, (size_t)512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf + BIG_BUF_SIZE + 512, (size_t)512) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(0, BIG_BUF_SIZE + 1536, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE + 1024);
+ HDmemset(rbuf, 0, (size_t)(BIG_BUF_SIZE + 1024));
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1531,13 +1543,13 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE + 1536, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(zbuf, rbuf, 1536) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf2, rbuf + 1536, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf, (size_t)1536) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 1536, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(1536, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE + 1536);
+ HDmemset(rbuf, 0, (size_t)(BIG_BUF_SIZE + 1536));
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1558,13 +1570,13 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE + 512, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(zbuf, rbuf, 512) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf2, rbuf + 512, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf, (size_t)512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 512, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(512, BIG_BUF_SIZE, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE + 512);
+ HDmemset(rbuf, 0, (size_t)(BIG_BUF_SIZE + 512));
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1585,14 +1597,14 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE + 1536, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf, rbuf, 1024) != 0) TEST_ERROR;
- if(HDmemcmp(zbuf, rbuf + 1024, 512) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf2, rbuf + 1536, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)1024) != 0) TEST_ERROR;
+ if(HDmemcmp(zbuf, rbuf + 1024, (size_t)512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 1536, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
/* Reset data in file back to zeros & reset the read buffer */
if(accum_write(0, BIG_BUF_SIZE + 1536, zbuf) < 0) FAIL_STACK_ERROR;
- HDmemset(rbuf, 0, BIG_BUF_SIZE + 1536);
+ HDmemset(rbuf, 0, (size_t)(BIG_BUF_SIZE + 1536));
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1613,8 +1625,8 @@ test_big(const H5F_io_info_t *fio_info)
if(accum_read(0, BIG_BUF_SIZE + 512, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read */
- if(HDmemcmp(wbuf, rbuf, 512) != 0) TEST_ERROR;
- if(HDmemcmp(wbuf2, rbuf + 512, BIG_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)512) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf2, rbuf + 512, (size_t)BIG_BUF_SIZE) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1666,9 +1678,9 @@ test_random_write(const H5F_io_info_t *fio_info)
unsigned u; /* Local index variable */
/* Allocate space for the write & read buffers */
- wbuf = (uint8_t *)HDmalloc(RANDOM_BUF_SIZE);
+ wbuf = (uint8_t *)HDmalloc((size_t)RANDOM_BUF_SIZE);
HDassert(wbuf);
- rbuf = (uint8_t *)HDcalloc(RANDOM_BUF_SIZE, 1);
+ rbuf = (uint8_t *)HDcalloc((size_t)RANDOM_BUF_SIZE, (size_t)1);
HDassert(rbuf);
/* Initialize write buffer */
@@ -1752,7 +1764,7 @@ HDfprintf(stderr, "Random # seed was: %u\n", seed);
if(accum_read(RANDOM_BASE_OFF, RANDOM_BUF_SIZE, rbuf) < 0) FAIL_STACK_ERROR;
/* Verify data read back in */
- if(HDmemcmp(wbuf, rbuf, RANDOM_BUF_SIZE) != 0) TEST_ERROR;
+ if(HDmemcmp(wbuf, rbuf, (size_t)RANDOM_BUF_SIZE) != 0) TEST_ERROR;
if(accum_reset(fio_info) < 0) FAIL_STACK_ERROR;
@@ -1777,6 +1789,216 @@ error:
return 1;
} /* end test_random_write() */
+/*-------------------------------------------------------------------------
+ * Function: test_swmr_write_big
+ *
+ * Purpose: A SWMR test: verifies that writing "large" metadata to a file
+ * opened with SWMR_WRITE will flush the existing metadata in the
+ * accumulator to disk first before writing the "large" metadata
+ * to disk.
+ * This test will fork and exec a reader "accum_swmr_reader" which
+ * opens the same file with SWMR_READ and verifies that the correct
+ * metadata is read from disk.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; April 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+unsigned
+test_swmr_write_big(hbool_t newest_format)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ H5F_t *rf = NULL; /* File pointer */
+ uint8_t *wbuf2 = NULL, *rbuf = NULL; /* Buffers for reading & writing */
+ uint8_t wbuf[1024]; /* Buffer for reading & writing */
+ unsigned u; /* Local index variable */
+#ifdef H5_HAVE_UNISTD_H
+ pid_t pid; /* Process ID */
+#endif /* H5_HAVE_UNISTD_H */
+ int status; /* Status returned from child process */
+ H5F_io_info_t fio_info; /* I/O info for operation */
+ char *new_argv[] = {NULL};
+ char *driver = NULL; /* VFD string (from env variable) */
+
+ if(newest_format) {
+ TESTING("SWMR write of large metadata: with latest format");
+ } else {
+ TESTING("SWMR write of large metadata: with non-latest-format");
+ } /* end if */
+
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
+
+ SKIPPED();
+ HDputs(" Test skipped due to fork or waitpid not defined.");
+ return 0;
+
+#else /* defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ driver = HDgetenv("HDF5_DRIVER");
+ if (!H5FD_supports_swmr_test(driver)) {
+ SKIPPED();
+ HDputs(" Test skipped due to VFD not supporting SWMR I/O.");
+ return 0;
+ } /* end if */
+
+ /* File access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ FAIL_STACK_ERROR
+
+ /* Both cases will result in v3 superblock and version 2 object header for SWMR */
+ if(newest_format) { /* latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ if((fid = H5Fcreate(SWMR_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+ } else { /* non-latest-format */
+ if((fid = H5Fcreate(SWMR_FILENAME, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR_WRITE */
+ if((fid = H5Fopen(SWMR_FILENAME, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get H5F_t * to internal file structure */
+ if(NULL == (rf = (H5F_t *)H5I_object(fid))) FAIL_STACK_ERROR
+
+ /* Set up I/O info for operation */
+ fio_info.f = rf;
+ if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5P_DATASET_XFER_DEFAULT)))
+ FAIL_STACK_ERROR
+
+ /* We'll be writing lots of garbage data, so extend the
+ file a ways. 10MB should do. */
+ if(H5FD_set_eoa(rf->shared->lf, H5FD_MEM_DEFAULT, (haddr_t)(1024*1024*10)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Reset metadata accumulator for the file */
+ if(accum_reset(&fio_info) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Allocate space for the write & read buffers */
+ if((wbuf2 = (uint8_t *)HDmalloc((size_t)BIG_BUF_SIZE)) == NULL)
+ FAIL_STACK_ERROR;
+ if((rbuf = (uint8_t *)HDmalloc((size_t)BIG_BUF_SIZE)) == NULL)
+ FAIL_STACK_ERROR;
+
+ /* Initialize wbuf with "0, 1, 2...1024"*/
+ for(u = 0; u < 1024; u++)
+ wbuf[u] = (uint8_t)u;
+
+ /* Write [1024, 1024] bytes with wbuf */
+ if(H5F_block_write(rf, H5FD_MEM_DEFAULT, (haddr_t)1024, (size_t)1024, H5P_DATASET_XFER_DEFAULT, wbuf) < 0)
+ FAIL_STACK_ERROR;
+ /* Read the data */
+ if(H5F_block_read(rf, H5FD_MEM_DEFAULT, (haddr_t)1024, (size_t)1024, H5P_DATASET_XFER_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+ /* Verify the data read is correct */
+ if(HDmemcmp(wbuf, rbuf, (size_t)1024) != 0)
+ TEST_ERROR;
+ /* Flush the data to disk */
+ if(accum_reset(&fio_info) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Initialize wbuf with all 1s */
+ for(u = 0; u < 1024; u++)
+ wbuf[u] = (uint8_t)1;
+
+ /* Initialize wbuf2 */
+ for(u = 0; u < BIG_BUF_SIZE; u++)
+ wbuf2[u] = (uint8_t)(u + 1);
+
+ /* Write [1024,1024] with wbuf--all 1s */
+ if(H5F_block_write(rf, H5FD_MEM_DEFAULT, (haddr_t)1024, (size_t)1024, H5P_DATASET_XFER_DEFAULT, wbuf) < 0)
+ FAIL_STACK_ERROR;
+ /* Read the data */
+ if(H5F_block_read(rf, H5FD_MEM_DEFAULT, (haddr_t)1024, (size_t)1024, H5P_DATASET_XFER_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+ /* Verify the data read is correct */
+ if(HDmemcmp(wbuf, rbuf, (size_t)1024) != 0)
+ TEST_ERROR;
+ /* The data stays in the accumulator */
+
+ /* Write a large piece of metadata [2048, BIG_BUF_SIZE] with wbuf2 */
+ if(H5F_block_write(rf, H5FD_MEM_DEFAULT, (haddr_t)2048, (size_t)BIG_BUF_SIZE, H5P_DATASET_XFER_DEFAULT, wbuf2) < 0)
+ FAIL_STACK_ERROR;
+ /* Read the data */
+ if(H5F_block_read(rf, H5FD_MEM_DEFAULT, (haddr_t)2048, (size_t)BIG_BUF_SIZE, H5P_DATASET_XFER_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+ /* Verify the data read is correct */
+ if(HDmemcmp(wbuf2, rbuf, (size_t)BIG_BUF_SIZE) != 0)
+ TEST_ERROR;
+
+ /* Fork child process to verify that the data at [1024, 2014] does get written to disk */
+ if((pid = HDfork()) < 0) {
+ HDperror("fork");
+ FAIL_STACK_ERROR;
+ } else if(0 == pid) { /* Child process */
+ /* Run the reader */
+ status = HDexecv(SWMR_READER, new_argv);
+ printf("errno from execv = %s\n", strerror(errno));
+ FAIL_STACK_ERROR;
+ } /* end if */
+
+ /* Parent process -- wait for the child process to complete */
+ while(pid != HDwaitpid(pid, &status, 0))
+ /*void*/;
+
+ /* Check if child process terminates normally and its return value */
+ if(WIFEXITED(status) && !WEXITSTATUS(status)) {
+ /* Flush the accumulator */
+ if(accum_reset(&fio_info) < 0)
+ FAIL_STACK_ERROR;
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close and remove the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+ HDremove(SWMR_FILENAME);
+
+ /* Release memory */
+ if(wbuf2)
+ HDfree(wbuf2);
+ if(rbuf)
+ HDfree(rbuf);
+ PASSED();
+ return 0;
+ } /* end if */
+
+error:
+ /* Closing and remove the file */
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ HDremove(SWMR_FILENAME);
+ /* Release memory */
+ if(wbuf2)
+ HDfree(wbuf2);
+ if(rbuf)
+ HDfree(rbuf);
+
+ return 1;
+
+#endif
+
+} /* end test_swmr_write_big() */
+
/*-------------------------------------------------------------------------
* Function: accum_printf
diff --git a/test/accum_swmr_reader.c b/test/accum_swmr_reader.c
new file mode 100644
index 0000000..102845f
--- /dev/null
+++ b/test/accum_swmr_reader.c
@@ -0,0 +1,99 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include "h5test.h"
+
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+
+#include "H5Fpkg.h"
+#include "H5FDpkg.h"
+#include "H5Iprivate.h"
+
+/* Filename: this is the same as the define in accum.c used by test_swmr_write_big() */
+#define SWMR_FILENAME "accum_swmr_big.h5"
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: This is the reader forked/execved by "test_swmr_write_big()"
+ * test in accum.c. The reader reads at address 1024 from the file
+ * and verifies that the metadata in the accumulator at address
+ * 1024 does get written to disk.
+ *
+ * Return: Success: EXIT_SUCCESS
+ * Failure: EXIT_FAILURE
+ *
+ * Programmer: Vailin Choi; June 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* file access property list ID */
+ H5F_t *f = NULL; /* File pointer */
+ unsigned u; /* Local index variable */
+ uint8_t rbuf[1024]; /* Buffer for reading */
+ uint8_t buf[1024]; /* Buffer for holding the expected data */
+ char *driver = NULL; /* VFD string (from env variable) */
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ driver = HDgetenv("HDF5_DRIVER");
+ if(!H5FD_supports_swmr_test(driver)) {
+ return EXIT_SUCCESS;
+ }
+
+ /* Initialize buffers */
+ for(u = 0; u < 1024; u++) {
+ rbuf[u] = 0; /* The buffer for reading */
+ buf[u] = 1; /* The expected data should be all 1s */
+ }
+
+ if((fapl = h5_fileaccess()) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR_READ */
+ if((fid = H5Fopen(SWMR_FILENAME, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get H5F_t * to internal file structure */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ FAIL_STACK_ERROR
+
+ /* Should read in [1024, 2024] with buf data */
+ if(H5F_block_read(f, H5FD_MEM_DEFAULT, (haddr_t)1024, (size_t)1024, H5P_DATASET_XFER_DEFAULT, rbuf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data read is correct */
+ if(HDmemcmp(buf, rbuf, (size_t)1024) != 0)
+ TEST_ERROR;
+
+ /* CLose the file */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ return EXIT_SUCCESS;
+
+error:
+ H5Fclose(fid);
+ return EXIT_FAILURE;
+} /* end main() */
+
diff --git a/test/atomic_reader.c b/test/atomic_reader.c
new file mode 100644
index 0000000..5d9ea68
--- /dev/null
+++ b/test/atomic_reader.c
@@ -0,0 +1,347 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*-------------------------------------------------------------------------
+ *
+ * Created: atomic_reader.c
+ *
+ * Purpose: This is the "reader" part of the standalone test to check
+ * atomic read-write operation on a system.
+ * a) atomic_reader.c--the reader (this file)
+ * a) atomic_writer.c--the writer
+ * c) atomic_data--the name of the data file used by writer and reader
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define FILENAME "atomic_data"
+#define READ_TRIES 20
+#define OPEN_TRIES 50
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static void usage(void);
+int verify(int fd, unsigned int k);
+void print_info(int *info, unsigned int lastr, unsigned iteration);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: To print the command line options
+ *
+ * Parameters: None
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("Usage: atomic_reader -n <number of integers to read> -i <number of iterations for reader>\n");
+ printf(" Note**The number of integers for option n has to be positive\n");
+ printf(" Note**The number of integers for option i has to be positive\n");
+ printf("\n");
+} /* usage() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify
+ *
+ * Purpose: To verify that the data read is the pattern expected.
+ * Each integer read should be the same as the index.
+ * When a difference is encountered, the remaining integers
+ * read should be the same as the previous index.
+ * For example, the pattern expected should be either:
+ * a) 01234567....n-1
+ * or
+ * b) if at index 4, a difference is encountered,
+ * the remaining integers should be all "3"s as:
+ * 012333333333333
+ *
+ * Parameters:
+ * fd--the file descriptor
+ * k--the number of integers to read
+ *
+ * Return:
+ * positive on success
+ * negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+verify(int fd, unsigned int k)
+{
+ unsigned int i; /* local index variable */
+ ssize_t bytes_read; /* the number of bytes read */
+ unsigned int *buf = NULL; /* buffer to hold data read */
+
+ /* Allocate buffer for data read */
+ if((buf = (unsigned int *)malloc(k * sizeof(unsigned int))) == NULL) {
+ printf("READER: error from malloc\n");
+ goto error;
+ }
+
+ /* Position the file at the beginning */
+ if(lseek(fd, (off_t)0, SEEK_SET) < 0) {
+ printf("READER: error from lseek\n");
+ goto error;
+ }
+
+ /* Read the whole file */
+ if((bytes_read = read(fd, buf, (k * sizeof(unsigned int)))) < 0) {
+ printf("READER: error from read\n");
+ goto error;
+ }
+
+ /* Verify the bytes read are correct */
+ if(bytes_read != (ssize_t)(k*sizeof(unsigned int))) {
+ printf("READER: error from bytes read=%lu\n", (unsigned long)bytes_read);
+ goto error;
+ }
+
+ /* Verify data read */
+ for(i=0; i < k; i++) {
+ if(buf[i] != i)
+ break;
+ }
+
+ if(i < k) {
+ /* Compare the beginning and ending sentinel values */
+ if(buf[k-1] != (i-1)) {
+ printf("FAIL IN READER: ...beginning sentinel value=%u, i=%u\n", (i-1), i);
+ printf("FAIL IN READER: buf[%u]=%u\n", i-1, buf[i-1]);
+ printf("FAIL IN READER: buf[%u]=%u\n", i, buf[i]);
+ printf("FAIL IN READER: buf[%u]=%u\n", i+1, buf[i+1]);
+ printf("FAIL IN READER: ...ending sentinel value=%u\n", buf[k-1]);
+ goto error;
+ }
+ }
+
+ /* Free the buffer */
+ if(buf) free(buf);
+ return(0);
+
+error:
+ /* Free the buffer */
+ if(buf) free(buf);
+ return(-1);
+} /* verify() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_info
+ *
+ * Purpose: To print the statistics gathered for re-reads
+ *
+ * Parameters:
+ * info--the array storing the statistics for re-reads
+ * lastr--the last read completed
+ * iteration--the current iteration
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+print_info(int *info, unsigned int lastr, unsigned iteration)
+{
+ unsigned j; /* local index variable */
+
+ printf("--------statistics for %u reads (iteration %u)--------\n", lastr, iteration);
+
+ for(j = 0; j <= READ_TRIES; j++)
+ printf("# of %u re-tries = %u\n", j, info[j]);
+
+ printf("--------end statistics for %u reads (iteration %u)--------\n", lastr, iteration);
+} /* print_info() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To verify that the data read is the pattern expected.
+ * (1) Make sure the file opens successfully and the # of bytes read is as expected
+ * (2) Iterate the reader with i iterations
+ * (3) Read and verify n integers for each iteration
+ * (4) On verification error, re-read the data at most READ_TRIES
+ * times to see if correct data can be obtained
+ * (5) Print out statistics for the number of re-retries for each iteration
+ *
+ * Note:
+ * (a) The # of integers (via -n option) used by the writer and reader should be the same.
+ * (b) The data file used by the writer and reader should be the same.
+ *
+ * Future enhancement:
+ * 1) Provide default values for n and i and allow user to run with either 0 or 1 option
+ * 2) Use HDF library HD<system calls> instead of the system calls
+ * 3) Handle large sized buffer (gigabytes) if needed
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int fd = -1; /* file descriptor */
+ unsigned int j=0, i=0, m=0; /* local index variables */
+ int temp; /* temporary variable */
+ unsigned int iterations = 0; /* the input for "-i" */
+ unsigned num = 0; /* the input for "-n" */
+ int opt = 0; /* option char */
+ int info[READ_TRIES+1]; /* re-tries statistics */
+
+ /* Ensure the expected # of arguments */
+ if(argc != 5) {
+ usage();
+ exit(-1);
+ }
+
+ /* Parse command line options */
+ while((opt = getopt(argc, argv, "n:i:")) != -1) {
+ switch(opt) {
+ case 'n':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(-1);
+ }
+ num = (unsigned int)temp;
+ break;
+ case 'i':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(-1);
+ }
+ iterations = (unsigned int)temp;
+ break;
+ default:
+ printf("Invalid option encountered\n");
+ break;
+ }
+ }
+
+ printf("READER: number of integers to read = %u; # of iterations = %d\n", num, iterations);
+
+ printf("\n");
+ for(i = 1; i <= iterations; i++) { /* iteration loop */
+ unsigned opens = OPEN_TRIES;
+
+ printf("READER: *****start iteration %u*****\n", i);
+
+ /* Ensure open and file size are done properly */
+ while(opens--) { /* open loop */
+ struct stat sinfo;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ if((fd = open(FILENAME, O_RDONLY, 0644)) < 0) {
+ printf("READER: error from open--retry open again\n");
+ } else {
+ printf("READER: open succeed\n");
+
+ if((fstat(fd, &sinfo) == 0) &&
+ (sinfo.st_size == (off_t)(num * sizeof(unsigned int)))) {
+ printf("READER: file size is correct--%u\n", (unsigned int)sinfo.st_size);
+ break;
+ }
+
+ printf("READER: error from fstat or file size of %u is incorrect--retry open again\n", (unsigned int)sinfo.st_size);
+ if(close(fd) < 0) {
+ printf("READER: error from close\n");
+ return(-1);
+ }
+ fd = -1;
+ }
+
+ } /* end while */
+
+ if(fd < 0) {
+ printf("READER: *****open failure/incorrect file size for all %u tries, continue next iteration*****\n\n", OPEN_TRIES);
+ continue;
+ }
+
+ memset(info, 0, sizeof(info));
+
+ /* Read and verify data */
+ for(j = 1; j <= num; j++) { /* read loop */
+
+ printf("READER: doing read %u\n", j);
+ if(verify(fd, num) < 0) {
+ printf("READER: error from read %u\n", j);
+
+ /* Perform re-read to see if correct data is obtained */
+ for(m = 1; m <= READ_TRIES; m++) { /* re-read loop */
+ printf("READER: ===============going to do re-read try %u\n", m);
+ if(verify(fd, num) < 0)
+ printf("READER: ===============error from re-read try %u\n", m);
+ else {
+ ++info[m];
+ printf("READER: ===============SUCCESS from re-read try %u\n", m);
+ break;
+ }
+ } /* end for */
+
+ if(m > READ_TRIES) {
+ printf("READER: ===============error from all re-read tries: %u\n", READ_TRIES);
+ printf("READER:*****ERROR--stop on read %u\n", j);
+ break;
+ }
+ } else {
+ ++info[0];
+ printf("READER: success from read %u\n", j);
+ }
+
+ } /* end for */
+
+ /* Print the statistics for re-reads */
+ print_info(info, j-1, i);
+
+ /* Close the file */
+ if(close(fd) < 0) {
+ printf("READER: error from close\n");
+ return(-1);
+ }
+
+ printf("READER: *****end iteration %u*****\n\n", i);
+
+ } /* end for */
+
+ return(0);
+}
diff --git a/test/atomic_writer.c b/test/atomic_writer.c
new file mode 100644
index 0000000..9f37ecc
--- /dev/null
+++ b/test/atomic_writer.c
@@ -0,0 +1,230 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: atomic_writer.c
+ *
+ * Purpose: This is the "writer" part of the standalone test to check
+ * atomic read-write operation on a system.
+ * a) atomic_writer.c--the writer (this file)
+ * b) atomic_reader.c--the reader
+ * c) atomic_data--the name of the data file used by writer and reader
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define FILENAME "atomic_data"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: To print information about the command line options
+ *
+ * Parameters: None
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("Usage: atomic_writer -n <number of integers to write> -i <number of iterations for writer>\n");
+ printf(" Note**The number of integers for option n has to be positive\n");
+ printf(" Note**The number of integers for option i has to be positive\n");
+ printf("\n");
+} /* usage() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To write a series of integers to a file for the reader to verify the data.
+ * A write is atomic if the whole amount written in one operation is not interleaved
+ * with data from any other process.
+ * (1) Iterate with i iterations
+ * (2) Write a series of integers (0 to n-1) to the file with this pattern:
+ * offset 0: 0000000000000000000000000000000
+ * offset 1: 111111111111111111111111111111
+ * offset 2: 22222222222222222222222222222
+ * offset 3: 3333333333333333333333333333
+ * ...
+ * ...
+ * offset n-1: (n-1)
+ *
+ * At the end of the writes, the data in the file will be:
+ * 01234567........(n-1)
+ *
+ * Note:
+ * (a) The # of integers (via -n option) used by the writer and reader should be the same.
+ * (b) The data file used by the writer and reader should be the same.
+ *
+ * Future enhancement:
+ * 1) Provide default values for n and i and allow user to run with either 0 or 1 option
+ * 2) Use HDF library HD<system calls> instead of the system calls
+ * 3) Handle large sized buffer (gigabytes) if needed
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int fd = -1; /* file descriptor */
+ ssize_t bytes_wrote; /* the nubmer of bytes written */
+ unsigned int *buf = NULL; /* buffer to hold written data */
+ unsigned int n, u, i; /* local index variable */
+ int temp; /* temporary variable */
+ unsigned int iterations = 0; /* the input for "-i" */
+ unsigned int num = 0; /* the input for "-n" */
+ int opt = 0; /* option char */
+
+ /* Ensure the # of arguments is as expected */
+ if(argc != 5) {
+ usage();
+ exit(-1);
+ }
+
+ /* Parse command line options */
+ while((opt = getopt(argc, argv, "n:i:")) != -1) {
+ switch(opt) {
+ case 'n':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(-1);
+ }
+ num = (unsigned int)temp;
+ break;
+ case 'i':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(-1);
+ }
+ iterations = (unsigned int)temp;
+ break;
+ default:
+ printf("Invalid option encountered\n");
+ break;
+ }
+ }
+
+ printf("WRITER: # of integers to write = %u; # of iterations = %d\n", num, iterations);
+
+ /* Remove existing data file if needed */
+ if(remove(FILENAME) < 0) {
+ if(errno == ENOENT)
+ printf("WRITER: remove %s--%s\n", FILENAME, strerror(errno));
+ else {
+ printf("WRITER: error from remove: %d--%s\n", errno, strerror(errno));
+ goto error;
+ }
+ } else
+ printf("WRITER: %s is removed\n", FILENAME);
+
+ /* Create the data file */
+ if((fd = open(FILENAME, O_RDWR|O_TRUNC|O_CREAT, 0664)) < 0) {
+ printf("WRITER: error from open\n");
+ goto error;
+ }
+
+ /* Allocate buffer for holding data to be written */
+ if((buf = (unsigned int *)malloc(num * sizeof(unsigned int))) == NULL) {
+ printf("WRITER: error from malloc\n");
+ if(fd >= 0 && close(fd) < 0)
+ printf("WRITER: error from close\n");
+ goto error;
+ }
+
+ printf("\n");
+
+ for(i = 1; i <= iterations; i++) { /* iteration loop */
+ printf("WRITER: *****start iteration %u*****\n", i);
+
+ /* Write the series of integers to the file */
+ for(n = 0; n < num; n++) { /* write loop */
+
+ /* Set up data to be written */
+ for(u=0; u < num; u++)
+ buf[u] = n;
+
+ /* Position the file to the proper location */
+ if(lseek(fd, (off_t)(n*sizeof(unsigned int)), SEEK_SET) < 0) {
+ printf("WRITER: error from lseek\n");
+ goto error;
+ }
+
+ /* Write the data */
+ if((bytes_wrote = write(fd, buf, ((num-n) * sizeof(unsigned int)))) < 0) {
+ printf("WRITER: error from write\n");
+ goto error;
+ }
+
+ /* Verify the bytes written is correct */
+ if(bytes_wrote != (ssize_t)((num-n) * sizeof(unsigned int))) {
+ printf("WRITER: error from bytes written\n");
+ goto error;
+ }
+ } /* end for */
+
+ printf("WRITER: *****end iteration %u*****\n\n", i);
+
+ } /* end for */
+
+ /* Close the file */
+ if(close(fd) < 0) {
+ printf("WRITER: error from close\n");
+ goto error;
+ }
+
+ /* Free the buffer */
+ if(buf) free(buf);
+
+ return(0);
+
+error:
+ return(-1);
+} /* main() */
diff --git a/test/bad_compound.h5 b/test/bad_compound.h5
index 1834a2e..1c96318 100644
--- a/test/bad_compound.h5
+++ b/test/bad_compound.h5
Binary files differ
diff --git a/test/btree2.c b/test/btree2.c
index 8f84135..83c079b 100644
--- a/test/btree2.c
+++ b/test/btree2.c
@@ -138,7 +138,7 @@ create_btree(H5F_t *f, hid_t dxpl, const H5B2_create_t *cparam,
H5B2_t **bt2, haddr_t *bt2_addr)
{
/* Create the v2 B-tree & get its address */
- if(NULL == (*bt2 = H5B2_create(f, dxpl, cparam, f)))
+ if(NULL == (*bt2 = H5B2_create(f, dxpl, cparam, f, NULL)))
FAIL_STACK_ERROR
if(H5B2_get_addr(*bt2, bt2_addr/*out*/) < 0)
FAIL_STACK_ERROR
@@ -177,7 +177,7 @@ reopen_btree(H5F_t *f, hid_t dxpl, H5B2_t **bt2, haddr_t bt2_addr,
FAIL_STACK_ERROR
/* Re-open v2 B-tree */
- if(NULL == (*bt2 = H5B2_open(f, dxpl, bt2_addr, f)))
+ if(NULL == (*bt2 = H5B2_open(f, dxpl, bt2_addr, f, NULL)))
FAIL_STACK_ERROR
} /* end if */
@@ -2854,7 +2854,7 @@ HDfprintf(stderr,"curr_time=%lu\n",(unsigned long)curr_time);
}
/* Re-open v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f, NULL)))
FAIL_STACK_ERROR
/* Check up on B-tree after re-open */
@@ -6551,7 +6551,7 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
} /* end for */
/* Re-open v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f, NULL)))
FAIL_STACK_ERROR
/* Remove all records */
@@ -6636,7 +6636,7 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
}
/* Re-open v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f, NULL)))
FAIL_STACK_ERROR
/* Remove all records */
@@ -6646,7 +6646,7 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
rrecord = HSIZET_MAX;
/* Remove random record */
- if(H5B2_remove_by_idx(bt2, dxpl, H5_ITER_INC, (hsize_t)rem_idx, remove_cb, &rrecord) < 0)
+ if(H5B2_remove_by_idx(bt2, dxpl, H5_ITER_INC, (hsize_t)rem_idx, NULL, remove_cb, &rrecord) < 0)
FAIL_STACK_ERROR
/* Make certain that the record value is correct */
@@ -6725,14 +6725,14 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
}
/* Re-open v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f, NULL)))
FAIL_STACK_ERROR
/* Remove all records */
for(u = 0; u < INSERT_MANY; u++) {
/* Remove first record */
rrecord = HSIZET_MAX;
- if(H5B2_remove_by_idx(bt2, dxpl, H5_ITER_INC, (hsize_t)0, remove_cb, &rrecord) < 0)
+ if(H5B2_remove_by_idx(bt2, dxpl, H5_ITER_INC, (hsize_t)0, NULL, remove_cb, &rrecord) < 0)
FAIL_STACK_ERROR
/* Make certain that the record value is correct */
@@ -6811,14 +6811,14 @@ HDfprintf(stderr, "curr_time = %lu\n", (unsigned long)curr_time);
}
/* Re-open v2 B-tree */
- if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f)))
+ if(NULL == (bt2 = H5B2_open(f, dxpl, bt2_addr, f, NULL)))
FAIL_STACK_ERROR
/* Remove all records */
for(u = 0; u < INSERT_MANY; u++) {
/* Remove last record */
rrecord = HSIZET_MAX;
- if(H5B2_remove_by_idx(bt2, dxpl, H5_ITER_DEC, (hsize_t)0, remove_cb, &rrecord) < 0)
+ if(H5B2_remove_by_idx(bt2, dxpl, H5_ITER_DEC, (hsize_t)0, NULL, remove_cb, &rrecord) < 0)
FAIL_STACK_ERROR
/* Make certain that the record value is correct */
@@ -7173,7 +7173,7 @@ test_delete(hid_t fapl, const H5B2_create_t *cparam)
/*
* Delete v2 B-tree
*/
- if(H5B2_delete(f, dxpl, bt2_addr, f, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl, bt2_addr, f, NULL, NULL, NULL) < 0)
FAIL_STACK_ERROR
/* Close the file */
@@ -7231,7 +7231,7 @@ test_delete(hid_t fapl, const H5B2_create_t *cparam)
/*
* Delete v2 B-tree
*/
- if(H5B2_delete(f, H5P_DATASET_XFER_DEFAULT, bt2_addr, f, NULL, NULL) < 0)
+ if(H5B2_delete(f, H5P_DATASET_XFER_DEFAULT, bt2_addr, f, NULL, NULL, NULL) < 0)
FAIL_STACK_ERROR
/* Close file */
@@ -7289,7 +7289,7 @@ test_delete(hid_t fapl, const H5B2_create_t *cparam)
/*
* Delete v2 B-tree
*/
- if(H5B2_delete(f, dxpl, bt2_addr, f, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl, bt2_addr, f, NULL, NULL, NULL) < 0)
FAIL_STACK_ERROR
/* Close file */
@@ -7347,7 +7347,7 @@ test_delete(hid_t fapl, const H5B2_create_t *cparam)
/*
* Delete v2 B-tree
*/
- if(H5B2_delete(f, dxpl, bt2_addr, f, NULL, NULL) < 0)
+ if(H5B2_delete(f, dxpl, bt2_addr, f, NULL, NULL, NULL) < 0)
FAIL_STACK_ERROR
/* Close file */
diff --git a/test/btree_idx_1_6.h5 b/test/btree_idx_1_6.h5
new file mode 100644
index 0000000..92f939b
--- /dev/null
+++ b/test/btree_idx_1_6.h5
Binary files differ
diff --git a/test/btree_idx_1_8.h5 b/test/btree_idx_1_8.h5
new file mode 100644
index 0000000..36662b2
--- /dev/null
+++ b/test/btree_idx_1_8.h5
Binary files differ
diff --git a/test/cache.c b/test/cache.c
index 38adbda..3a4a0da 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -187,7 +187,7 @@ static unsigned check_resize_entry_errs(void);
static unsigned check_unprotect_ro_dirty_err(void);
static unsigned check_protect_ro_rw_err(void);
static unsigned check_check_evictions_enabled_err(void);
-static unsigned check_auto_cache_resize(void);
+static unsigned check_auto_cache_resize(hbool_t cork_ageout);
static unsigned check_auto_cache_resize_disable(void);
static unsigned check_auto_cache_resize_epoch_markers(void);
static unsigned check_auto_cache_resize_input_errs(void);
@@ -197,6 +197,7 @@ static unsigned check_flush_deps(void);
static unsigned check_flush_deps_err(void);
static unsigned check_flush_deps_order(void);
static unsigned check_notify_cb(void);
+static unsigned check_metadata_cork(hbool_t fill_via_insertion);
static unsigned check_entry_deletions_during_scans(void);
static void cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr);
static void cedds__H5C_make_space_in_cache(H5F_t * file_ptr);
@@ -2885,7 +2886,7 @@ check_insert_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -9524,63 +9525,63 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
* array only processes as much of it as it is told to, we don't have to
* worry about maintaining the status of entries that we haven't used yet.
*/
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par idx: dep ref.count: dep height: order: */
- { VARIABLE_ENTRY_TYPE, 0, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 1, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 2, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 3, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 4, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 5, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 6, VARIABLE_ENTRY_SIZE/2, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 7, VARIABLE_ENTRY_SIZE/2, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 8, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { VARIABLE_ENTRY_TYPE, 9, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 0, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 1, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 2, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 3, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 4, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 5, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 6, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 7, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 8, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 9, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 10, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 11, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 12, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { LARGE_ENTRY_TYPE, 13, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked */
+ { VARIABLE_ENTRY_TYPE, 0, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 1, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 2, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 3, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 4, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 5, VARIABLE_ENTRY_SIZE/4, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 6, VARIABLE_ENTRY_SIZE/2, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 7, VARIABLE_ENTRY_SIZE/2, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 8, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { VARIABLE_ENTRY_TYPE, 9, VARIABLE_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 0, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 1, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 2, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 3, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 4, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 5, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 6, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 7, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 8, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 9, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 10, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 11, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 12, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { LARGE_ENTRY_TYPE, 13, LARGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
if ( pass ) {
@@ -12774,7 +12775,7 @@ check_get_entry_status(void)
*/
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -12802,7 +12803,7 @@ check_get_entry_status(void)
if ( pass ) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -12828,7 +12829,7 @@ check_get_entry_status(void)
if ( pass ) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -12854,7 +12855,7 @@ check_get_entry_status(void)
if ( pass ) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -12880,7 +12881,7 @@ check_get_entry_status(void)
if ( pass ) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -12906,7 +12907,7 @@ check_get_entry_status(void)
if ( pass ) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -12994,7 +12995,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -13035,7 +13036,7 @@ check_expunge_entry(void)
if ( pass ) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -13078,7 +13079,7 @@ check_expunge_entry(void)
*/
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -13119,7 +13120,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -13161,7 +13162,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -13204,8 +13205,8 @@ check_expunge_entry(void)
*/
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected,
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14168,7 +14169,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14246,7 +14247,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14331,7 +14332,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14399,7 +14400,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14459,7 +14460,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14499,7 +14500,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14613,7 +14614,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14693,7 +14694,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14778,7 +14779,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14848,7 +14849,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14908,7 +14909,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -14948,7 +14949,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -15244,7 +15245,7 @@ check_evictions_enabled(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
NULL, &in_cache, NULL, NULL, NULL,
- NULL, NULL);
+ NULL, NULL, NULL);
if ( result < 0 ) {
@@ -15310,7 +15311,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[1]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -15529,7 +15530,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[2]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -15564,7 +15565,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[3]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -15698,7 +15699,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[4]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -17144,6 +17145,135 @@ check_protect_ro_rw_err(void)
/*-------------------------------------------------------------------------
+ * Function: check_protect_retries()
+ *
+ * Purpose: To exercise checksum verification retries for an entry with
+ * a speculative load.
+ *
+ * Return:
+ *
+ * Programmer:
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+check_protect_retries(void)
+{
+ H5F_t * file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ test_entry_t *base_addr = NULL;
+ test_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t * cache_entry_ptr = NULL;
+ int32_t type;
+ int32_t idx;
+
+ TESTING("protect an entry to verify retries");
+
+ pass = TRUE;
+
+ /* Set up the cache */
+ if(pass) {
+
+ reset_entries();
+
+ file_ptr = setup_cache((size_t)(2 * 1024),
+ (size_t)(1 * 1024));
+
+ /* Set up read attempts for verifying checksum */
+ file_ptr->shared->read_attempts = 10;
+ file_ptr->shared->retries_nbins = 1;
+ }
+
+ /* Test only for this type which has a speculative load */
+ type = VARIABLE_ENTRY_TYPE;
+ idx = 0;
+
+ if(pass) {
+
+ cache_ptr = file_ptr->shared->cache;
+ base_addr = entries[type];
+ entry_ptr = &(base_addr[idx]);
+
+ /* test case (1):
+ * --actual_len is smaller the initial length from get_load_size()
+ * --verify_chksum() returns TRUE after max_verify_ct is reached
+ *
+ */
+ entry_ptr->actual_len = entry_ptr->size/2;
+ entry_ptr->max_verify_ct = 3;
+ entry_ptr->verify_ct = 0;
+
+ cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types[type]), entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
+
+ if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
+ ( !(entry_ptr->header.is_protected) ) ||
+ ( !(entry_ptr->header.is_read_only) ) ||
+ ( entry_ptr->header.ro_ref_count <= 0 ) ||
+ ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->size != entry_ptr->header.size ) ||
+ ( entry_ptr->addr != entry_ptr->header.addr ) ||
+ ( entry_ptr->verify_ct != entry_ptr->max_verify_ct ) ) {
+
+ pass = FALSE;
+ failure_mssg = "error from H5C_protect().";
+
+ } else {
+
+ HDassert( ( entry_ptr->cache_ptr == NULL ) ||
+ ( entry_ptr->cache_ptr == cache_ptr ) );
+
+ entry_ptr->cache_ptr = cache_ptr;
+ entry_ptr->file_ptr = file_ptr;
+ entry_ptr->is_protected = TRUE;
+ entry_ptr->is_read_only = TRUE;
+ entry_ptr->ro_ref_count++;
+ }
+
+ HDassert( ((entry_ptr->header).type)->id == type );
+ }
+
+ if(pass)
+ unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, idx, H5C__NO_FLAGS_SET);
+
+ if(pass) {
+ entry_ptr = &(base_addr[++idx]);
+
+ /* test case (2):
+ * --actual_len is greater the initial length from get_load_size()
+ * --verify_chksum() returns FALSE even after all tries is reached
+ * (file_ptr->shared->read_attempts is smaller then max_verify_ct)
+ */
+ entry_ptr->actual_len = entry_ptr->size*2;
+ entry_ptr->max_verify_ct = 11;
+ entry_ptr->verify_ct = 0;
+
+ cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5P_DATASET_XFER_DEFAULT,
+ &(types[type]), entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
+
+ /* H5C_protect() should fail after all retries fail */
+ if(cache_entry_ptr != NULL)
+ pass = FALSE;
+ }
+
+
+ takedown_cache(file_ptr, FALSE, FALSE);
+ reset_entries();
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass ) {
+
+ HDfprintf(stdout, "%s: failure_msg = \"%s\".\n",
+ FUNC, failure_mssg);
+ }
+
+ return (unsigned)!pass;
+
+} /* check_protect_retries() */
+
+
+/*-------------------------------------------------------------------------
* Function: check_evictions_enabled_err()
*
* Purpose: Verify that H5C_get_evictions_enabled() and
@@ -17300,6 +17430,9 @@ check_check_evictions_enabled_err(void)
* Added a basic set of tests for the flash cache size
* increment code.
*
+ * Vailin Choi; Feb 2014
+ * Add the parameter to indicate "corking" the entry or not.
+ * Suggest to do more thorough testing on this.
*-------------------------------------------------------------------------
*/
@@ -17320,7 +17453,7 @@ static void test_rpt_fcn(H5_ATTR_UNUSED H5C_t * cache_ptr,
}
static unsigned
-check_auto_cache_resize(void)
+check_auto_cache_resize(hbool_t cork_ageout)
{
hbool_t show_progress = FALSE;
herr_t result;
@@ -18310,6 +18443,8 @@ check_auto_cache_resize(void)
if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+ if(cork_ageout)
+ cork_entry_type(file_ptr, MEDIUM_ENTRY_TYPE);
/* fill the cache with 1024 byte entries -- nothing should happen
* for three epochs while the markers are inserted into the cache
*
@@ -18653,6 +18788,9 @@ check_auto_cache_resize(void)
if ( show_progress ) HDfprintf(stderr, "check point %d\n", checkpoint++);
+ if(cork_ageout)
+ uncork_entry_type(file_ptr, MEDIUM_ENTRY_TYPE);
+
/* repeat the above test, but with max_decrement enabled to see
* if that features works as it should. Note that this will change
@@ -27930,162 +28068,164 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion)
*/
struct expected_entry_status expected[150] =
{
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type: par idx: dep ref.count: dep height: order: */
- { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 4, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 5, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 6, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 7, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 8, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 9, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 10, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 11, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 12, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 13, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 14, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 15, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 16, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 17, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 18, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 19, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 20, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 21, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 22, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 23, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 24, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 25, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 26, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 27, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 28, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 29, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 30, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 31, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 32, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 33, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 34, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 35, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 36, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 37, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 38, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 39, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 40, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 41, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 42, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 43, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 44, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 45, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 46, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 47, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 48, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 49, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 50, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 51, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 52, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 53, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 54, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 55, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 56, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 57, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 58, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 59, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 60, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 61, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 62, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 63, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 64, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 65, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 66, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 67, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 68, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 69, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 70, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 71, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 72, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 73, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 74, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 75, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 76, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 77, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 78, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 79, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 80, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 81, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 82, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 83, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 84, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 85, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 86, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 87, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 88, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 89, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 90, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 91, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 92, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 93, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 94, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 95, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 96, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 97, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 98, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 99, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 100, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 101, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 102, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 103, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 104, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 105, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 106, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 107, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 108, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 109, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 110, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 111, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 112, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 113, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 114, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 115, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 116, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 117, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 118, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 119, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 120, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 121, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 122, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 123, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 124, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 125, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 126, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 127, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 128, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 129, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 130, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 131, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 132, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 133, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 134, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 135, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 136, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 137, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 138, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 139, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 140, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 141, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 142, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 143, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 144, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 145, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 146, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 147, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 148, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 149, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked: */
+ { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 4, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 5, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 6, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 7, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 8, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 9, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 10, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 11, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 12, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 13, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 14, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 15, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 16, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 17, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 18, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 19, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 20, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 21, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 22, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 23, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 24, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 25, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 26, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 27, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 28, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 29, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 30, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 31, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 32, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 33, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 34, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 35, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 36, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 37, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 38, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 39, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 40, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 41, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 42, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 43, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 44, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 45, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 46, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 47, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 48, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 49, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 50, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 51, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 52, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 53, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 54, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 55, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 56, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 57, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 58, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 59, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 60, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 61, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 62, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 63, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 64, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 65, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 66, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 67, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 68, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 69, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 70, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 71, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 72, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 73, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 74, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 75, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 76, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 77, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 78, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 79, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 80, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 81, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 82, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 83, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 84, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 85, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 86, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 87, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 88, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 89, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 90, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 91, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 92, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 93, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 94, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 95, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 96, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 97, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 98, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 99, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 100, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 101, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 102, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 103, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 104, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 105, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 106, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 107, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 108, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 109, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 110, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 111, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 112, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 113, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 114, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 115, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 116, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 117, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 118, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 119, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 120, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 121, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 122, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 123, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 124, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 125, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 126, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 127, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 128, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 129, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 130, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 131, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 132, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 133, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 134, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 135, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 136, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 137, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 138, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 139, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 140, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 141, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 142, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 143, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 144, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 145, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 146, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 147, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 148, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 149, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
pass = TRUE;
+ reset_entries();
+
if (fill_via_insertion) {
TESTING("to ensure metadata blizzard absence when inserting");
@@ -28761,12 +28901,12 @@ check_flush_deps(void)
struct expected_entry_status expected[5] =
{
/* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type: par idx: dep ref.count: dep height: order: */
- { PICO_ENTRY_TYPE, 0, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 1, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 2, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 3, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 4, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked */
+ { PICO_ENTRY_TYPE, 0, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 1, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 2, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 3, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 4, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
TESTING("flush dependencies");
@@ -28816,7 +28956,7 @@ check_flush_deps(void)
/* Check the parent's entry status */
entry_ptr = &(base_addr[1]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || is_flush_dep_parent || is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28824,7 +28964,7 @@ check_flush_deps(void)
/* Check the child's entry status */
entry_ptr = &(base_addr[0]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || is_flush_dep_parent || is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28835,7 +28975,7 @@ check_flush_deps(void)
/* Check the parent's entry status */
entry_ptr = &(base_addr[1]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || !is_flush_dep_parent || is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28843,7 +28983,7 @@ check_flush_deps(void)
/* Check the child's entry status */
entry_ptr = &(base_addr[0]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || is_flush_dep_parent || !is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28851,12 +28991,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -28880,12 +29021,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroy flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd =0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -28911,14 +29051,16 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 2;
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 2;
+ expected[0].flush_dep_npar = 1;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[0] = 2;
- expected[2].flush_dep_height = 1;
+ expected[2].flush_dep_nchd = 2;
+ expected[2].flush_dep_ndirty_chd = 2;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -28945,14 +29087,12 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroy flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -28977,12 +29117,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29000,12 +29141,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29023,12 +29165,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 3;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 3;
+ expected[2].flush_dep_npar = 1;
expected[3].is_protected = TRUE;
expected[3].is_pinned = TRUE;
- expected[3].child_flush_dep_height_rc[2] = 1;
- expected[3].flush_dep_height = 3;
+ expected[3].flush_dep_nchd = 1;
+ expected[3].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29052,12 +29195,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[2].flush_dep_npar = 0;
expected[3].is_protected = FALSE;
expected[3].is_pinned = FALSE;
- expected[3].child_flush_dep_height_rc[2] = 0;
- expected[3].flush_dep_height = 0;
+ expected[3].flush_dep_nchd = 0;
+ expected[3].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29078,12 +29220,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29104,12 +29245,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29120,7 +29260,7 @@ check_flush_deps(void)
}
/* Test Case #3a2 - Single chain of flush dependencies, 4 entries tall
- * created from the "bottom up" and destroyed from the "top down"
+ * created from the "bottom up" and destroyed from the "bottom up"
*/
/* Create flush dependency between entries (child) 0->1->2->3 (parent) */
@@ -29134,12 +29274,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29157,12 +29298,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29180,12 +29322,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 3;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 3;
+ expected[2].flush_dep_npar = 1;
expected[3].is_protected = TRUE;
expected[3].is_pinned = TRUE;
- expected[3].child_flush_dep_height_rc[2] = 1;
- expected[3].flush_dep_height = 3;
+ expected[3].flush_dep_nchd = 1;
+ expected[3].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29209,18 +29352,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
- expected[2].child_flush_dep_height_rc[0] = 1;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 1;
- expected[3].child_flush_dep_height_rc[1] = 1;
- expected[3].child_flush_dep_height_rc[2] = 0;
- expected[3].flush_dep_height = 2;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29241,15 +29377,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].flush_dep_height = 0;
- expected[3].child_flush_dep_height_rc[0] = 1;
- expected[3].child_flush_dep_height_rc[1] = 0;
- expected[3].flush_dep_height = 1;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29270,12 +29402,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[2].flush_dep_npar = 0;
expected[3].is_protected = FALSE;
expected[3].is_pinned = FALSE;
- expected[3].child_flush_dep_height_rc[0] = 0;
- expected[3].flush_dep_height = 0;
+ expected[3].flush_dep_nchd = 0;
+ expected[3].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29300,12 +29431,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 3;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 3;
+ expected[2].flush_dep_npar = 1;
expected[3].is_protected = TRUE;
expected[3].is_pinned = TRUE;
- expected[3].child_flush_dep_height_rc[0] = 1;
- expected[3].flush_dep_height = 1;
+ expected[3].flush_dep_nchd = 1;
+ expected[3].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29323,15 +29455,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[0] = 1;
- expected[2].flush_dep_height = 1;
- expected[3].child_flush_dep_height_rc[0] = 0;
- expected[3].child_flush_dep_height_rc[1] = 1;
- expected[3].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29349,18 +29479,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
- expected[3].child_flush_dep_height_rc[1] = 0;
- expected[3].child_flush_dep_height_rc[2] = 1;
- expected[3].flush_dep_height = 3;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29384,12 +29509,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[2].flush_dep_npar = 0;
expected[3].is_protected = FALSE;
expected[3].is_pinned = FALSE;
- expected[3].child_flush_dep_height_rc[2] = 0;
- expected[3].flush_dep_height = 0;
+ expected[3].flush_dep_nchd = 0;
+ expected[3].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29410,12 +29534,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29436,12 +29559,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29466,12 +29588,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 3;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 3;
+ expected[2].flush_dep_npar = 1;
expected[3].is_protected = TRUE;
expected[3].is_pinned = TRUE;
- expected[3].child_flush_dep_height_rc[0] = 1;
- expected[3].flush_dep_height = 1;
+ expected[3].flush_dep_nchd = 1;
+ expected[3].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29489,15 +29612,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[0] = 1;
- expected[2].flush_dep_height = 1;
- expected[3].child_flush_dep_height_rc[0] = 0;
- expected[3].child_flush_dep_height_rc[1] = 1;
- expected[3].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29515,18 +29636,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
- expected[3].child_flush_dep_height_rc[1] = 0;
- expected[3].child_flush_dep_height_rc[2] = 1;
- expected[3].flush_dep_height = 3;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29550,18 +29666,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
- expected[2].child_flush_dep_height_rc[0] = 1;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 1;
- expected[3].child_flush_dep_height_rc[1] = 1;
- expected[3].child_flush_dep_height_rc[2] = 0;
- expected[3].flush_dep_height = 2;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29582,15 +29691,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].flush_dep_height = 0;
- expected[3].child_flush_dep_height_rc[0] = 1;
- expected[3].child_flush_dep_height_rc[1] = 0;
- expected[3].flush_dep_height = 1;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29611,12 +29716,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[2].flush_dep_npar = 0;
expected[3].is_protected = FALSE;
expected[3].is_pinned = FALSE;
- expected[3].child_flush_dep_height_rc[0] = 0;
- expected[3].flush_dep_height = 0;
+ expected[3].flush_dep_nchd = 0;
+ expected[3].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29640,12 +29744,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[u].flush_dep_par_type = entry_type;
- expected[u].flush_dep_par_idx = 4;
+ expected[u].flush_dep_par_type[0] = entry_type;
+ expected[u].flush_dep_par_idx[0] = 4;
+ expected[u].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = u + 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd++;
+ expected[4].flush_dep_ndirty_chd++;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29665,9 +29770,9 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[u].flush_dep_par_type = -1;
- expected[u].flush_dep_par_idx = -1;
- expected[4].child_flush_dep_height_rc[0] = 3 - u;
+ expected[u].flush_dep_npar = 0;
+ expected[4].flush_dep_nchd--;
+ expected[4].flush_dep_ndirty_chd--;
/* Check for destroying flush dependency on last entry */
if(3 == u) {
@@ -29682,7 +29787,6 @@ check_flush_deps(void)
*/
expected[4].is_protected = FALSE;
expected[4].is_pinned = FALSE;
- expected[4].flush_dep_height = 0;
} /* end if */
/* Verify the status */
@@ -29711,12 +29815,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29734,12 +29839,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 4;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 4;
+ expected[3].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = 1;
+ expected[4].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29757,15 +29863,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[4].flush_dep_par_type = entry_type;
- expected[4].flush_dep_par_idx = 0;
+ expected[4].flush_dep_par_type[0] = entry_type;
+ expected[4].flush_dep_par_idx[0] = 0;
+ expected[4].flush_dep_npar = 1;
expected[0].is_protected = TRUE;
expected[0].is_pinned = TRUE;
- expected[0].child_flush_dep_height_rc[1] = 1;
- expected[0].flush_dep_height = 2;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].child_flush_dep_height_rc[2] = 1;
- expected[1].flush_dep_height = 3;
+ expected[0].flush_dep_nchd = 1;
+ expected[0].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29790,15 +29894,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[4].flush_dep_par_type = -1;
- expected[4].flush_dep_par_idx = -1;
+ expected[4].flush_dep_npar = 0;
expected[0].is_protected = FALSE;
expected[0].is_pinned = FALSE;
- expected[0].child_flush_dep_height_rc[1] = 0;
- expected[0].flush_dep_height = 0;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].child_flush_dep_height_rc[2] = 0;
- expected[1].flush_dep_height = 1;
+ expected[0].flush_dep_nchd = 0;
+ expected[0].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29819,12 +29919,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[3].flush_dep_npar = 0;
expected[4].is_protected = FALSE;
expected[4].is_pinned = FALSE;
- expected[4].child_flush_dep_height_rc[0] = 0;
- expected[4].flush_dep_height = 0;
+ expected[4].flush_dep_nchd = 0;
+ expected[4].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29845,12 +29944,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29877,12 +29975,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5F_t * cache_ptr */
@@ -29900,12 +29999,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29923,12 +30023,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 4;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 4;
+ expected[3].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = 1;
+ expected[4].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29943,13 +30044,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[4].flush_dep_par_type = entry_type;
- expected[4].flush_dep_par_idx = 1;
- expected[1].child_flush_dep_height_rc[1] = 1;
- expected[1].flush_dep_height = 2;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].child_flush_dep_height_rc[2] = 1;
- expected[2].flush_dep_height = 3;
+ expected[4].flush_dep_par_type[0] = entry_type;
+ expected[4].flush_dep_par_idx[0] = 1;
+ expected[4].flush_dep_npar = 1;
+ expected[1].flush_dep_nchd = 2;
+ expected[1].flush_dep_ndirty_chd = 2;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29968,13 +30067,9 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[4].flush_dep_par_type = -1;
- expected[4].flush_dep_par_idx = -1;
- expected[1].child_flush_dep_height_rc[1] = 0;
- expected[1].flush_dep_height = 1;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].child_flush_dep_height_rc[2] = 0;
- expected[2].flush_dep_height = 2;
+ expected[4].flush_dep_npar = 0;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -29995,12 +30090,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[3].flush_dep_npar = 0;
expected[4].is_protected = FALSE;
expected[4].is_pinned = FALSE;
- expected[4].child_flush_dep_height_rc[0] = 0;
- expected[4].flush_dep_height = 0;
+ expected[4].flush_dep_nchd = 0;
+ expected[4].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30021,12 +30115,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30047,12 +30140,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30079,12 +30171,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30102,12 +30195,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30125,12 +30219,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 4;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 4;
+ expected[3].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = 1;
+ expected[4].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30145,9 +30240,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[4].flush_dep_par_type = entry_type;
- expected[4].flush_dep_par_idx = 2;
- expected[2].child_flush_dep_height_rc[1] = 2;
+ expected[4].flush_dep_par_type[0] = entry_type;
+ expected[4].flush_dep_par_idx[0] = 2;
+ expected[4].flush_dep_npar = 1;
+ expected[2].flush_dep_nchd = 2;
+ expected[2].flush_dep_ndirty_chd = 2;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30166,9 +30263,9 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[4].flush_dep_par_type = -1;
- expected[4].flush_dep_par_idx = -1;
- expected[2].child_flush_dep_height_rc[1] = 1;
+ expected[4].flush_dep_npar = 0;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30189,12 +30286,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[3].flush_dep_npar = 0;
expected[4].is_protected = FALSE;
expected[4].is_pinned = FALSE;
- expected[4].child_flush_dep_height_rc[0] = 0;
- expected[4].flush_dep_height = 0;
+ expected[4].flush_dep_nchd = 0;
+ expected[4].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30215,12 +30311,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_protected = FALSE;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30241,12 +30336,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30274,12 +30368,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30306,12 +30401,11 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroy flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30339,12 +30433,13 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30378,12 +30473,121 @@ check_flush_deps(void)
/* Change expected values, and verify the status of the entries
* after destroy flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_protected = FALSE;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+ }
+
+/* Test Case #7 - Simple multiple parents
+ */
+
+ /* Create flush dependency between entries (child) 0 and 1, 2 (parents)
+ */
+ {
+ protect_entry(file_ptr, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 1, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
+ expected[1].is_protected = TRUE;
+ expected[1].is_pinned = TRUE;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ protect_entry(file_ptr, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 2, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[0].flush_dep_par_type[1] = entry_type;
+ expected[0].flush_dep_par_idx[1] = 2;
+ expected[0].flush_dep_npar = 2;
+ expected[2].is_protected = TRUE;
+ expected[2].is_pinned = TRUE;
+ expected[2].flush_dep_nchd = 1;
+ expected[2].flush_dep_ndirty_chd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+ }
+
+ /* Destroy flush dependency between entries */
+ {
+ destroy_flush_dependency(entry_type, 1, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 1, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 2;
+ expected[0].flush_dep_npar = 1;
+ expected[1].is_protected = FALSE;
+ expected[1].is_pinned = FALSE;
+ expected[1].flush_dep_nchd = 0;
+ expected[1].flush_dep_ndirty_chd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 2, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 2, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[0].flush_dep_npar = 0;
+ expected[2].is_protected = FALSE;
+ expected[2].is_pinned = FALSE;
+ expected[2].flush_dep_nchd = 0;
+ expected[2].flush_dep_ndirty_chd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30436,7 +30640,7 @@ check_flush_deps_err(void)
/* Loop over test cases, check for various errors in configuring flush
* dependencies. Verify that all performs as expected.
*/
- for(test_count = 0; test_count < 9; test_count++) {
+ for(test_count = 0; test_count < 7; test_count++) {
unsigned u; /* Local index variable */
herr_t result; /* Generic return value */
@@ -30474,161 +30678,14 @@ check_flush_deps_err(void)
if ( !pass ) CACHE_ERROR("unprotect_entry failed")
break;
- /* Verify that a child entry can only have one flush dependency parent */
- case 2:
- protect_entry(file_ptr, entry_type, 0);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 0, entry_type, 2);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 1);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- result = H5C_create_flush_dependency(&((entries[entry_type])[1]), &((entries[entry_type])[2]));
- if( result != FAIL ) CACHE_ERROR("Creating second flush dependency for child")
-
- destroy_flush_dependency(entry_type, 0, entry_type, 2);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 1, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 0, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
- break;
-
- /* Verify that a flush dependency chain can't be higher than (H5C__NUM_FLUSH_DEP_HEIGHTS - 1) */
- case 3:
- protect_entry(file_ptr, entry_type, 0);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 0, entry_type, 1);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 1);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 1, entry_type, 2);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 2);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 2, entry_type, 3);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 3);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 3, entry_type, 4);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 4);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 4, entry_type, 5);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 5);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- create_flush_dependency(entry_type, 5, entry_type, 6);
- if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
-
- protect_entry(file_ptr, entry_type, 6);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- result = H5C_create_flush_dependency(&((entries[entry_type])[6]), &((entries[entry_type])[7]));
- if( result != FAIL ) CACHE_ERROR("Creating flush dependency that's too tall")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 6, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- protect_entry(file_ptr, entry_type, 7);
- if ( !pass ) CACHE_ERROR("protect_entry failed")
-
- result = H5C_create_flush_dependency(&((entries[entry_type])[7]), &((entries[entry_type])[0]));
- if( result != FAIL ) CACHE_ERROR("Creating flush dependency that's too tall")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 7, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- destroy_flush_dependency(entry_type, 0, entry_type, 1);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 0, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- destroy_flush_dependency(entry_type, 1, entry_type, 2);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 1, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- destroy_flush_dependency(entry_type, 2, entry_type, 3);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 2, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- destroy_flush_dependency(entry_type, 3, entry_type, 4);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 3, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- destroy_flush_dependency(entry_type, 4, entry_type, 5);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 4, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
-
- destroy_flush_dependency(entry_type, 5, entry_type, 6);
- if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
-
- unprotect_entry(file_ptr, /* H5F_t * file_ptr */
- entry_type, /* int32_t type */
- 5, /* int32_t idx */
- H5C__NO_FLAGS_SET); /* unsigned int flags */
- if ( !pass ) CACHE_ERROR("unprotect_entry failed")
- break;
-
/* Verify that parent entry must be protected */
- case 4:
+ case 2:
result = H5C_destroy_flush_dependency(&((entries[entry_type])[0]), &((entries[entry_type])[1]));
if( result != FAIL ) CACHE_ERROR("Destroying [non-existant] dependency when parent isn't protected")
break;
/* Verify that parent entry has flush dependency */
- case 5:
+ case 3:
protect_entry(file_ptr, entry_type, 0);
if ( !pass ) CACHE_ERROR("protect_entry failed")
@@ -30643,7 +30700,7 @@ check_flush_deps_err(void)
break;
/* Verify that child entry is in flush dependency relationship */
- case 6:
+ case 4:
protect_entry(file_ptr, entry_type, 0);
if ( !pass ) CACHE_ERROR("protect_entry failed")
@@ -30663,8 +30720,8 @@ check_flush_deps_err(void)
if ( !pass ) CACHE_ERROR("unprotect_entry failed")
break;
- /* Verify that parent has child entries at this height */
- case 7:
+ /* Verify that parent has child as direct descendant */
+ case 5:
protect_entry(file_ptr, entry_type, 0);
if ( !pass ) CACHE_ERROR("protect_entry failed")
@@ -30684,7 +30741,7 @@ check_flush_deps_err(void)
if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
result = H5C_destroy_flush_dependency(&((entries[entry_type])[0]), &((entries[entry_type])[4]));
- if( result != FAIL ) CACHE_ERROR("Destroying dependency when parent has no children at child's height")
+ if( result != FAIL ) CACHE_ERROR("Destroying dependency when child is not a direct descendant has no children at child's height")
destroy_flush_dependency(entry_type, 0, entry_type, 1);
if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
@@ -30716,7 +30773,7 @@ check_flush_deps_err(void)
/* Verify that child entry is child of parent */
- case 8:
+ case 6:
protect_entry(file_ptr, entry_type, 0);
if ( !pass ) CACHE_ERROR("protect_entry failed")
@@ -30800,12 +30857,12 @@ check_flush_deps_order(void)
struct expected_entry_status expected[5] =
{
/* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type: par idx: dep ref.count: dep height: order: */
- { PICO_ENTRY_TYPE, 0, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 1, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 2, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 3, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { PICO_ENTRY_TYPE, 4, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked */
+ { PICO_ENTRY_TYPE, 0, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 1, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 2, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 3, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { PICO_ENTRY_TYPE, 4, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
unsigned flush_order; /* Index for tracking flush order */
@@ -30855,12 +30912,13 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[1].flush_dep_ndirty_chd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30901,6 +30959,7 @@ check_flush_deps_order(void)
expected[1].serialized = TRUE;
expected[1].flush_order = 1;
expected[1].is_protected = FALSE;
+ expected[1].flush_dep_ndirty_chd =0;
expected[2].is_dirty = FALSE;
expected[2].serialized = TRUE;
expected[3].is_dirty = FALSE;
@@ -30924,11 +30983,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroy flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -30953,10 +31010,10 @@ check_flush_deps_order(void)
*/
expected[0].is_protected = TRUE;
expected[0].is_pinned = TRUE;
- expected[0].child_flush_dep_height_rc[0] = 1;
- expected[0].flush_dep_height = 1;
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 0;
+ expected[0].flush_dep_nchd = 1;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 0;
+ expected[1].flush_dep_npar = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31026,10 +31083,8 @@ check_flush_deps_order(void)
* after destroy flush dependency
*/
expected[0].is_pinned = FALSE;
- expected[0].child_flush_dep_height_rc[0] = 0;
- expected[0].flush_dep_height = 0;
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[0].flush_dep_nchd = 0;
+ expected[1].flush_dep_npar = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31057,14 +31112,15 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 2;
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 2;
+ expected[0].flush_dep_npar = 1;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[0] = 2;
- expected[2].flush_dep_height = 1;
+ expected[2].flush_dep_nchd = 2;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31144,13 +31200,10 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroy flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
+ expected[1].flush_dep_npar = 0;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31180,12 +31233,13 @@ check_flush_deps_order(void)
*/
expected[0].is_protected = TRUE;
expected[0].is_pinned = TRUE;
- expected[0].child_flush_dep_height_rc[0] = 2;
- expected[0].flush_dep_height = 1;
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 0;
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 0;
+ expected[0].flush_dep_nchd = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 0;
+ expected[1].flush_dep_npar = 1;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 0;
+ expected[2].flush_dep_npar = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31266,12 +31320,9 @@ check_flush_deps_order(void)
* after destroy flush dependency
*/
expected[0].is_pinned = FALSE;
- expected[0].child_flush_dep_height_rc[0] = 0;
- expected[0].flush_dep_height = 0;
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[0].flush_dep_nchd = 0;
+ expected[1].flush_dep_npar = 0;
+ expected[2].flush_dep_npar = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31296,12 +31347,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31319,12 +31370,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31342,12 +31393,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 3;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 3;
+ expected[2].flush_dep_npar = 1;
expected[3].is_protected = TRUE;
expected[3].is_pinned = TRUE;
- expected[3].child_flush_dep_height_rc[2] = 1;
- expected[3].flush_dep_height = 3;
+ expected[3].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31445,11 +31496,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[2].flush_dep_npar = 0;
expected[3].is_pinned = FALSE;
- expected[3].child_flush_dep_height_rc[2] = 0;
- expected[3].flush_dep_height = 0;
+ expected[3].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31464,11 +31513,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31483,11 +31530,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31514,10 +31559,10 @@ check_flush_deps_order(void)
*/
expected[0].is_protected = TRUE;
expected[0].is_pinned = TRUE;
- expected[0].child_flush_dep_height_rc[0] = 1;
- expected[0].flush_dep_height = 1;
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 0;
+ expected[0].flush_dep_nchd = 1;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 0;
+ expected[1].flush_dep_npar = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31535,15 +31580,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].child_flush_dep_height_rc[0] = 0;
- expected[0].child_flush_dep_height_rc[1] = 1;
- expected[0].flush_dep_height = 2;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
- expected[2].flush_dep_par_type = entry_type;
- expected[2].flush_dep_par_idx = 1;
+ expected[1].flush_dep_nchd = 1;
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 1;
+ expected[2].flush_dep_npar = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31561,18 +31603,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].child_flush_dep_height_rc[1] = 0;
- expected[0].child_flush_dep_height_rc[2] = 1;
- expected[0].flush_dep_height = 3;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].child_flush_dep_height_rc[1] = 1;
- expected[1].flush_dep_height = 2;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[0] = 1;
- expected[2].flush_dep_height = 1;
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 2;
+ expected[2].flush_dep_nchd = 1;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 2;
+ expected[3].flush_dep_npar = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31671,17 +31707,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].child_flush_dep_height_rc[2] = 0;
- expected[0].child_flush_dep_height_rc[1] = 1;
- expected[0].flush_dep_height = 2;
- expected[1].child_flush_dep_height_rc[1] = 0;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[0] = 0;
- expected[2].flush_dep_height = 0;
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[2].flush_dep_nchd = 0;
+ expected[3].flush_dep_npar = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31696,14 +31724,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].child_flush_dep_height_rc[1] = 0;
- expected[0].child_flush_dep_height_rc[0] = 1;
- expected[0].flush_dep_height = 1;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
- expected[2].flush_dep_par_type = -1;
- expected[2].flush_dep_par_idx = -1;
+ expected[1].flush_dep_nchd = 0;
+ expected[2].flush_dep_npar = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31719,10 +31742,8 @@ check_flush_deps_order(void)
* after destroying flush dependency
*/
expected[0].is_pinned = FALSE;
- expected[0].child_flush_dep_height_rc[0] = 0;
- expected[0].flush_dep_height = 0;
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[0].flush_dep_nchd = 0;
+ expected[1].flush_dep_npar = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31746,12 +31767,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[u].flush_dep_par_type = entry_type;
- expected[u].flush_dep_par_idx = 4;
+ expected[u].flush_dep_par_type[0] = entry_type;
+ expected[u].flush_dep_par_idx[0] = 4;
+ expected[u].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = u + 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = u + 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31846,9 +31867,8 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[u].flush_dep_par_type = -1;
- expected[u].flush_dep_par_idx = -1;
- expected[4].child_flush_dep_height_rc[0] = 3 - u;
+ expected[u].flush_dep_npar = 0;
+ expected[4].flush_dep_nchd = 3 - u;
/* Check for destroying flush dependency on last entry */
if(3 == u) {
@@ -31856,7 +31876,6 @@ check_flush_deps_order(void)
* after destroying flush dependency
*/
expected[4].is_pinned = FALSE;
- expected[4].flush_dep_height = 0;
} /* end if */
/* Verify the status */
@@ -31882,12 +31901,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[u].flush_dep_par_type = entry_type;
- expected[u].flush_dep_par_idx = 0;
+ expected[u].flush_dep_par_type[0] = entry_type;
+ expected[u].flush_dep_par_idx[0] = 0;
+ expected[u].flush_dep_npar = 1;
expected[0].is_protected = TRUE;
expected[0].is_pinned = TRUE;
- expected[0].child_flush_dep_height_rc[0] = u;
- expected[0].flush_dep_height = 1;
+ expected[0].flush_dep_nchd = u;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -31982,9 +32001,8 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[u].flush_dep_par_type = -1;
- expected[u].flush_dep_par_idx = -1;
- expected[0].child_flush_dep_height_rc[0] = 4 - u;
+ expected[u].flush_dep_npar = 0;
+ expected[0].flush_dep_nchd = 4 - u;
/* Check for destroying flush dependency on last entry */
if(4 == u) {
@@ -31992,7 +32010,6 @@ check_flush_deps_order(void)
* after destroying flush dependency
*/
expected[0].is_pinned = FALSE;
- expected[0].flush_dep_height = 0;
} /* end if */
/* Verify the status */
@@ -32021,12 +32038,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32044,12 +32061,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 4;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 4;
+ expected[3].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32067,15 +32084,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[4].flush_dep_par_type = entry_type;
- expected[4].flush_dep_par_idx = 0;
+ expected[4].flush_dep_par_type[0] = entry_type;
+ expected[4].flush_dep_par_idx[0] = 0;
+ expected[4].flush_dep_npar = 1;
expected[0].is_protected = TRUE;
expected[0].is_pinned = TRUE;
- expected[0].child_flush_dep_height_rc[1] = 1;
- expected[0].flush_dep_height = 2;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].child_flush_dep_height_rc[2] = 1;
- expected[1].flush_dep_height = 3;
+ expected[0].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32180,14 +32194,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[4].flush_dep_par_type = -1;
- expected[4].flush_dep_par_idx = -1;
+ expected[4].flush_dep_npar = 0;
expected[0].is_pinned = FALSE;
- expected[0].child_flush_dep_height_rc[1] = 0;
- expected[0].flush_dep_height = 0;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].child_flush_dep_height_rc[2] = 0;
- expected[1].flush_dep_height = 1;
+ expected[0].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32202,11 +32211,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[3].flush_dep_npar = 0;
expected[4].is_pinned = FALSE;
- expected[4].child_flush_dep_height_rc[0] = 0;
- expected[4].flush_dep_height = 0;
+ expected[4].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32221,11 +32228,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32252,12 +32257,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32275,12 +32280,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32298,12 +32303,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 4;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 4;
+ expected[3].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32318,13 +32323,10 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[4].flush_dep_par_type = entry_type;
- expected[4].flush_dep_par_idx = 1;
- expected[1].child_flush_dep_height_rc[1] = 1;
- expected[1].flush_dep_height = 2;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].child_flush_dep_height_rc[2] = 1;
- expected[2].flush_dep_height = 3;
+ expected[4].flush_dep_par_type[0] = entry_type;
+ expected[4].flush_dep_par_idx[0] = 1;
+ expected[4].flush_dep_npar = 1;
+ expected[1].flush_dep_nchd = 2;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32433,13 +32435,8 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[4].flush_dep_par_type = -1;
- expected[4].flush_dep_par_idx = -1;
- expected[1].child_flush_dep_height_rc[1] = 0;
- expected[1].flush_dep_height = 1;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].child_flush_dep_height_rc[2] = 0;
- expected[2].flush_dep_height = 2;
+ expected[4].flush_dep_npar = 0;
+ expected[1].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32454,11 +32451,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[3].flush_dep_npar = 0;
expected[4].is_pinned = FALSE;
- expected[4].child_flush_dep_height_rc[0] = 0;
- expected[4].flush_dep_height = 0;
+ expected[4].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32473,11 +32468,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32492,11 +32485,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32523,12 +32514,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[0].flush_dep_par_type = entry_type;
- expected[0].flush_dep_par_idx = 1;
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
expected[1].is_protected = TRUE;
expected[1].is_pinned = TRUE;
- expected[1].child_flush_dep_height_rc[0] = 1;
- expected[1].flush_dep_height = 1;
+ expected[1].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32546,12 +32537,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[1].flush_dep_par_type = entry_type;
- expected[1].flush_dep_par_idx = 2;
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
expected[2].is_protected = TRUE;
expected[2].is_pinned = TRUE;
- expected[2].child_flush_dep_height_rc[1] = 1;
- expected[2].flush_dep_height = 2;
+ expected[2].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32569,12 +32560,12 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[3].flush_dep_par_type = entry_type;
- expected[3].flush_dep_par_idx = 4;
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 4;
+ expected[3].flush_dep_npar = 1;
expected[4].is_protected = TRUE;
expected[4].is_pinned = TRUE;
- expected[4].child_flush_dep_height_rc[0] = 1;
- expected[4].flush_dep_height = 1;
+ expected[4].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32589,9 +32580,10 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after creating flush dependency
*/
- expected[4].flush_dep_par_type = entry_type;
- expected[4].flush_dep_par_idx = 2;
- expected[2].child_flush_dep_height_rc[1] = 2;
+ expected[4].flush_dep_par_type[0] = entry_type;
+ expected[4].flush_dep_par_idx[0] = 2;
+ expected[4].flush_dep_npar = 1;
+ expected[2].flush_dep_nchd = 2;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32670,7 +32662,7 @@ check_flush_deps_order(void)
expected[0].is_protected = FALSE;
expected[1].is_dirty = FALSE;
expected[1].serialized = TRUE;
- expected[1].flush_order = 2;
+ expected[1].flush_order = 1;
expected[1].is_protected = FALSE;
expected[2].is_dirty = FALSE;
expected[2].serialized = TRUE;
@@ -32678,7 +32670,7 @@ check_flush_deps_order(void)
expected[2].is_protected = FALSE;
expected[3].is_dirty = FALSE;
expected[3].serialized = TRUE;
- expected[3].flush_order = 1;
+ expected[3].flush_order = 2;
expected[4].is_dirty = FALSE;
expected[4].serialized = TRUE;
expected[4].flush_order = 3;
@@ -32700,9 +32692,8 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[4].flush_dep_par_type = -1;
- expected[4].flush_dep_par_idx = -1;
- expected[2].child_flush_dep_height_rc[1] = 1;
+ expected[4].flush_dep_npar = 0;
+ expected[2].flush_dep_nchd = 1;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32717,11 +32708,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[3].flush_dep_par_type = -1;
- expected[3].flush_dep_par_idx = -1;
+ expected[3].flush_dep_npar = 0;
expected[4].is_pinned = FALSE;
- expected[4].child_flush_dep_height_rc[0] = 0;
- expected[4].flush_dep_height = 0;
+ expected[4].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32736,11 +32725,9 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[1].flush_dep_par_type = -1;
- expected[1].flush_dep_par_idx = -1;
+ expected[1].flush_dep_npar = 0;
expected[2].is_pinned = FALSE;
- expected[2].child_flush_dep_height_rc[1] = 0;
- expected[2].flush_dep_height = 0;
+ expected[2].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32755,11 +32742,641 @@ check_flush_deps_order(void)
/* Change expected values, and verify the status of the entries
* after destroying flush dependency
*/
- expected[0].flush_dep_par_type = -1;
- expected[0].flush_dep_par_idx = -1;
+ expected[0].flush_dep_npar = 0;
expected[1].is_pinned = FALSE;
- expected[1].child_flush_dep_height_rc[0] = 0;
- expected[1].flush_dep_height = 0;
+ expected[1].flush_dep_nchd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+ }
+
+/* Test Case #6a - Interlocked multiple parents, increasing addr order
+ */
+
+ /* Create flush dependencies between entries 0-3, with each entry a child
+ * of every entry with a higher number.
+ */
+ {
+ protect_entry(file_ptr, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 1, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[0].flush_dep_par_type[0] = entry_type;
+ expected[0].flush_dep_par_idx[0] = 1;
+ expected[0].flush_dep_npar = 1;
+ expected[1].is_protected = TRUE;
+ expected[1].is_pinned = TRUE;
+ expected[1].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ protect_entry(file_ptr, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 2, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[0].flush_dep_par_type[1] = entry_type;
+ expected[0].flush_dep_par_idx[1] = 2;
+ expected[0].flush_dep_npar = 2;
+ expected[2].is_protected = TRUE;
+ expected[2].is_pinned = TRUE;
+ expected[2].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ create_flush_dependency(entry_type, 2, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 2;
+ expected[1].flush_dep_npar = 1;
+ expected[2].flush_dep_nchd = 2;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ protect_entry(file_ptr, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 3, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[0].flush_dep_par_type[2] = entry_type;
+ expected[0].flush_dep_par_idx[2] = 3;
+ expected[0].flush_dep_npar = 3;
+ expected[3].is_protected = TRUE;
+ expected[3].is_pinned = TRUE;
+ expected[3].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ create_flush_dependency(entry_type, 3, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[1].flush_dep_par_type[1] = entry_type;
+ expected[1].flush_dep_par_idx[1] = 3;
+ expected[1].flush_dep_npar = 2;
+ expected[3].flush_dep_nchd = 2;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ create_flush_dependency(entry_type, 3, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 3;
+ expected[2].flush_dep_npar = 1;
+ expected[3].flush_dep_nchd = 3;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ }
+
+ /* Flush the cache and verify that the entries were flushed in correct order */
+ {
+ herr_t result; /* Generic return value */
+
+ add_flush_op(entry_type, 0, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+ add_flush_op(entry_type, 1, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+ add_flush_op(entry_type, 2, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+ add_flush_op(entry_type, 3, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 1, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 2, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 3, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ /* Mark entries 0-3 dirty, so they are flushed */
+ dirty_entry(file_ptr, entry_type, 0, FALSE);
+ dirty_entry(file_ptr, entry_type, 1, TRUE);
+ dirty_entry(file_ptr, entry_type, 2, TRUE);
+ dirty_entry(file_ptr, entry_type, 3, FALSE);
+ if ( !pass ) CACHE_ERROR("dirty_entry failed")
+
+ /* Reset 'flushed' flag & 'flush_order' value in expected array */
+ expected[0].serialized = FALSE;
+ expected[0].flush_order = -1;
+ expected[1].serialized = FALSE;
+ expected[1].flush_order = -1;
+ expected[2].serialized = FALSE;
+ expected[2].flush_order = -1;
+ expected[3].serialized = FALSE;
+ expected[3].flush_order = -1;
+
+ /* Reset index for tracking flush order */
+ flush_order = 0;
+
+ result = H5C_flush_cache(file_ptr, H5P_DATASET_XFER_DEFAULT, H5C__NO_FLAGS_SET);
+ if( result < 0 ) CACHE_ERROR("flushing entries with flush dependendices")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroy flush dependency
+ */
+ expected[0].is_dirty = FALSE;
+ expected[0].serialized = TRUE;
+ expected[0].flush_order = 0;
+ expected[0].is_protected = FALSE;
+ expected[1].is_dirty = FALSE;
+ expected[1].serialized = TRUE;
+ expected[1].flush_order = 1;
+ expected[1].is_protected = FALSE;
+ expected[2].is_dirty = FALSE;
+ expected[2].serialized = TRUE;
+ expected[2].flush_order = 2;
+ expected[2].is_protected = FALSE;
+ expected[3].is_dirty = FALSE;
+ expected[3].serialized = TRUE;
+ expected[3].flush_order = 3;
+ expected[3].is_protected = FALSE;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+ }
+
+ /* Destroy flush dependency between entries, in reverse order */
+ {
+ destroy_flush_dependency(entry_type, 3, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[2].flush_dep_npar = 0;
+ expected[3].flush_dep_nchd = 2;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 3, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[1].flush_dep_npar = 1;
+ expected[3].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 3, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[0].flush_dep_npar = 2;
+ expected[3].is_pinned = FALSE;
+ expected[3].flush_dep_nchd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 2, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[1].flush_dep_npar = 0;
+ expected[2].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 2, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[0].flush_dep_npar = 1;
+ expected[2].is_pinned = FALSE;
+ expected[2].flush_dep_nchd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 1, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[0].flush_dep_npar = 0;
+ expected[1].is_pinned = FALSE;
+ expected[1].flush_dep_nchd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+ }
+
+/* Test Case #6b - Interlocked multiple parents, decreasing addr order
+ */
+
+ /* Create flush dependencies between entries 0-3, with each entry a parent
+ * of every entry with a higher number.
+ */
+ {
+ protect_entry(file_ptr, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 2, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[3].flush_dep_par_type[0] = entry_type;
+ expected[3].flush_dep_par_idx[0] = 2;
+ expected[3].flush_dep_npar = 1;
+ expected[2].is_protected = TRUE;
+ expected[2].is_pinned = TRUE;
+ expected[2].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ protect_entry(file_ptr, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 1, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[3].flush_dep_par_type[1] = entry_type;
+ expected[3].flush_dep_par_idx[1] = 1;
+ expected[3].flush_dep_npar = 2;
+ expected[1].is_protected = TRUE;
+ expected[1].is_pinned = TRUE;
+ expected[1].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ create_flush_dependency(entry_type, 1, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[2].flush_dep_par_type[0] = entry_type;
+ expected[2].flush_dep_par_idx[0] = 1;
+ expected[2].flush_dep_npar = 1;
+ expected[1].flush_dep_nchd = 2;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ protect_entry(file_ptr, entry_type, 0);
+ if ( !pass ) CACHE_ERROR("protect_entry failed")
+
+ create_flush_dependency(entry_type, 0, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[3].flush_dep_par_type[2] = entry_type;
+ expected[3].flush_dep_par_idx[2] = 0;
+ expected[3].flush_dep_npar = 3;
+ expected[0].is_protected = TRUE;
+ expected[0].is_pinned = TRUE;
+ expected[0].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ create_flush_dependency(entry_type, 0, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[2].flush_dep_par_type[1] = entry_type;
+ expected[2].flush_dep_par_idx[1] = 0;
+ expected[2].flush_dep_npar = 2;
+ expected[0].flush_dep_nchd = 2;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ create_flush_dependency(entry_type, 0, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("create_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after creating flush dependency
+ */
+ expected[1].flush_dep_par_type[0] = entry_type;
+ expected[1].flush_dep_par_idx[0] = 0;
+ expected[1].flush_dep_npar = 1;
+ expected[0].flush_dep_nchd = 3;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ }
+
+ /* Flush the cache and verify that the entries were flushed in correct order */
+ {
+ herr_t result; /* Generic return value */
+
+ add_flush_op(entry_type, 0, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+ add_flush_op(entry_type, 1, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+ add_flush_op(entry_type, 2, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+ add_flush_op(entry_type, 3, FLUSH_OP__ORDER,
+ entry_type, 0, FALSE, (size_t)0, &flush_order);
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 0, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 1, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 2, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ if ( !pass ) CACHE_ERROR("unprotect_entry failed")
+
+ /* Mark entries 0-3 dirty, so they are flushed */
+ dirty_entry(file_ptr, entry_type, 0, FALSE);
+ dirty_entry(file_ptr, entry_type, 1, TRUE);
+ dirty_entry(file_ptr, entry_type, 2, TRUE);
+ dirty_entry(file_ptr, entry_type, 3, FALSE);
+ if ( !pass ) CACHE_ERROR("dirty_entry failed")
+
+ /* Reset 'flushed' flag & 'flush_order' value in expected array */
+ expected[0].serialized = FALSE;
+ expected[0].flush_order = -1;
+ expected[1].serialized = FALSE;
+ expected[1].flush_order = -1;
+ expected[2].serialized = FALSE;
+ expected[2].flush_order = -1;
+ expected[3].serialized = FALSE;
+ expected[3].flush_order = -1;
+
+ /* Reset index for tracking flush order */
+ flush_order = 0;
+
+ result = H5C_flush_cache(file_ptr, H5P_DATASET_XFER_DEFAULT, H5C__NO_FLAGS_SET);
+ if( result < 0 ) CACHE_ERROR("flushing entries with flush dependendices")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroy flush dependency
+ */
+ expected[0].is_dirty = FALSE;
+ expected[0].serialized = TRUE;
+ expected[0].flush_order = 3;
+ expected[0].is_protected = FALSE;
+ expected[1].is_dirty = FALSE;
+ expected[1].serialized = TRUE;
+ expected[1].flush_order = 2;
+ expected[1].is_protected = FALSE;
+ expected[2].is_dirty = FALSE;
+ expected[2].serialized = TRUE;
+ expected[2].flush_order = 1;
+ expected[2].is_protected = FALSE;
+ expected[3].is_dirty = FALSE;
+ expected[3].serialized = TRUE;
+ expected[3].flush_order = 0;
+ expected[3].is_protected = FALSE;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+ }
+
+ /* Destroy flush dependency between entries, in reverse order */
+ {
+ destroy_flush_dependency(entry_type, 0, entry_type, 1);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[1].flush_dep_npar = 0;
+ expected[0].flush_dep_nchd = 2;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 0, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[2].flush_dep_npar = 1;
+ expected[0].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 0, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[3].flush_dep_npar = 2;
+ expected[0].is_pinned = FALSE;
+ expected[0].flush_dep_nchd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 1, entry_type, 2);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[2].flush_dep_npar = 0;
+ expected[1].flush_dep_nchd = 1;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 1, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[3].flush_dep_npar = 1;
+ expected[1].is_pinned = FALSE;
+ expected[1].flush_dep_nchd = 0;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ (int)0, /* int tag */
+ (int)5, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ if ( !pass ) CACHE_ERROR("verify_entry_status failed")
+
+ destroy_flush_dependency(entry_type, 2, entry_type, 3);
+ if ( !pass ) CACHE_ERROR("destroy_flush_dependency failed")
+
+ /* Change expected values, and verify the status of the entries
+ * after destroying flush dependency
+ */
+ expected[3].flush_dep_npar = 0;
+ expected[2].is_pinned = FALSE;
+ expected[2].flush_dep_nchd = 0;
/* Verify the status */
verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
@@ -32809,13 +33426,13 @@ check_notify_cb(void)
unsigned u; /* Local index variable */
struct expected_entry_status expected[5] =
{
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type: par idx: dep ref.count: dep height: order: */
- { NOTIFY_ENTRY_TYPE, 0, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { NOTIFY_ENTRY_TYPE, 1, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { NOTIFY_ENTRY_TYPE, 2, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { NOTIFY_ENTRY_TYPE, 3, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { NOTIFY_ENTRY_TYPE, 4, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked: */
+ { NOTIFY_ENTRY_TYPE, 0, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { NOTIFY_ENTRY_TYPE, 1, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { NOTIFY_ENTRY_TYPE, 2, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { NOTIFY_ENTRY_TYPE, 3, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { NOTIFY_ENTRY_TYPE, 4, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
TESTING("'notify' callback");
@@ -32986,6 +33603,740 @@ done:
/*-------------------------------------------------------------------------
+ * Function: check_metadata_cork
+ *
+ * Purpose: To verify that dirty corked entries are not evicted from the cache
+ * but clean corked entries can be evicted from the cache.
+ * The min_clean_size does not have effect.
+ * NOTE: This is a modification of check_metadata_blizzard_absence().
+ *
+ * Return: void
+ *
+ * Programmer: Vailin Choi
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+check_metadata_cork(hbool_t fill_via_insertion)
+{
+ const char * fcn_name = "check_metadata_cork";
+ int entry_type = HUGE_ENTRY_TYPE;
+ size_t entry_size = HUGE_ENTRY_SIZE; /* 16 KB */
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ hbool_t show_progress = FALSE;
+ int32_t checkpoint = 0;
+ int32_t entry_idx = 0;
+ int32_t i;
+
+ /* Expected deserialized status of entries depends on how they get into
+ * the cache. Insertions = not deserialized, protect/unprotect = deserialized.
+ */
+ hbool_t deserialized = (hbool_t)!(fill_via_insertion);
+
+ /* Set up the expected array. This is used to maintain a table of the
+ * expected status of every entry used in this test.
+ */
+ struct expected_entry_status expected[150] =
+ {
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked: */
+ { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 4, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 5, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 6, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 7, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 8, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 9, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 10, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 11, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 12, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 13, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 14, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 15, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 16, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 17, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 18, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 19, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 20, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 21, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 22, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 23, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 24, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 25, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 26, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 27, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 28, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 29, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 30, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 31, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 32, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 33, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 34, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 35, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 36, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 37, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 38, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 39, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 40, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 41, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 42, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 43, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 44, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 45, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 46, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 47, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 48, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 49, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 50, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 51, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 52, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 53, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 54, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 55, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 56, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 57, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 58, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 59, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 60, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 61, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 62, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 63, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 64, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 65, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 66, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 67, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 68, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 69, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 70, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 71, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 72, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 73, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 74, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 75, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 76, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 77, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 78, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 79, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 80, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 81, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 82, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 83, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 84, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 85, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 86, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 87, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 88, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 89, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 90, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 91, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 92, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 93, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 94, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 95, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 96, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 97, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 98, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 99, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 100, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 101, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 102, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 103, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 104, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 105, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 106, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 107, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 108, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 109, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 110, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 111, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 112, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 113, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 114, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 115, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 116, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 117, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 118, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 119, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 120, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 121, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 122, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 123, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 124, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 125, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 126, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 127, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 128, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 129, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 130, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 131, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 132, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 133, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 134, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 135, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 136, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 137, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 138, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 139, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 140, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 141, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 142, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 143, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 144, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 145, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 146, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 147, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 148, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 149, HUGE_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
+ } ;
+
+ pass = TRUE;
+
+ reset_entries();
+
+ if (fill_via_insertion) {
+
+ TESTING("to ensure cork/uncork metadata when inserting");
+
+ } else {
+
+ TESTING("to ensure cork/uncork metadata on protect/unprotect");
+ }
+
+ if ( show_progress) /* 0 */
+ HDfprintf(stdout, "\n%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Set up the cache.
+ *
+ * The max_cache_size should have room for 50 entries.
+ * The min_clean_size is half of that, or 25 entries.
+ */
+ file_ptr = setup_cache((size_t)(50 * entry_size), /* max_cache_size */
+ (size_t)(25 * entry_size)); /* min_clean_size */
+
+ if ( file_ptr == NULL) {
+
+ pass = FALSE;
+ failure_mssg = "bad return from cache intialization.\n";
+
+ }
+ else
+ cache_ptr = file_ptr->shared->cache;
+ }
+
+ /* Cork the cache entry type */
+ cork_entry_type(file_ptr, entry_type);
+
+ if ( show_progress) /* 1 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 1:
+ *
+ * Inserting dirty corked entries into an empty cache, until the cache
+ * violates the min_clean_size requirement.
+ * Since entries are all dirty and corked, no entry will get flushed or
+ * evicted.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Insert 26 entries (indexes 0 through 25) into the cache. */
+ for (entry_idx = 0; entry_idx < 26; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* Change expected values, and verify the status of the entries
+ * after each insertion
+ */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].deserialized = (unsigned char)deserialized;
+ expected[entry_idx].is_corked = TRUE;
+
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ }
+ }
+
+ if ( show_progress) /* 2 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 2:
+ *
+ * Inserting entries into a cache that violates the min_clean_size,
+ * until the cache is full.
+ * Since entries are all dirty and corked, no entry during this phase
+ * will get flushed or evicted.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Insert the 27th entry (index = 26) into the cache. */
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries.
+ *
+ * Expected status is that there are 27 entries in the cache, and
+ * all entries remain the same as before since they are all corked
+ * and dirty
+ */
+
+ /* entry w/ index 26 is now in the cache and dirty. */
+ expected[26].in_cache = TRUE;
+ expected[26].is_dirty = TRUE;
+ expected[26].deserialized = (unsigned char)deserialized;
+ expected[26].is_corked = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 26, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+
+ if ( show_progress) /* 3 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert the 28th entry (index = 27) into the cache. */
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx++, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries.
+ *
+ * Expected status is that there are 28 entries in the cache, and
+ * all entries are dirty corked entries.
+ *
+ */
+ expected[27].in_cache = TRUE;
+ expected[27].is_dirty = TRUE;
+ expected[27].deserialized = (unsigned char)deserialized;
+ expected[27].is_corked = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 27, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+
+ if ( show_progress) /* 4 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Fill out the rest of the cache with entries */
+ /* Verify expected status of entries after each insertion */
+ for (entry_idx = entry_idx; entry_idx < 50; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /*
+ * Expected status: all entries are dirty corked entries.
+ */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].deserialized = (unsigned char)deserialized;
+ expected[entry_idx].is_corked = TRUE;
+
+ /* Verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ /* Verify that the cache is now full */
+ if ( cache_ptr->cache_full != TRUE ) {
+
+ pass = FALSE;
+ failure_mssg = "cache not completely filled.\n";
+ }
+ }
+
+ if ( show_progress) /* 5 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 3:
+ * Inserting entries into a cache that is completely full.
+ * No entry is flushed or evicted because all entries are dirty & corked.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( show_progress) /* 6 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert 50 more entries (indices 50-99) into the cache. */
+ for (entry_idx = entry_idx; entry_idx < 100; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* This past inserted entry is now in the cache: dirty and corked */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].deserialized = (unsigned char)deserialized;
+ expected[entry_idx].is_corked = TRUE;
+
+ /* Verify this expected status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+ }
+
+ if ( show_progress) /* 7 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+
+ /* ========================================================================
+ * ========================================================================
+ * Phase 4:
+ *
+ * Flushing the entire cache, and then inserting entries into a cache
+ * that is completely full, but all clean.
+ *
+ * ========================================================================
+ * ========================================================================
+ */
+
+ if ( pass ) {
+
+ /* Flush the cache.
+ *
+ * We're doing this so we can repeat the above insertions, but
+ * starting from a cache filled with clean entries as opposed
+ * to an empty cache.
+ */
+
+ flush_cache(file_ptr, /* H5F_t * file_ptr */
+ FALSE, /* hbool_t destory_entries */
+ FALSE, /* hbool_t dump_stats */
+ FALSE); /* hbool_t dump_detailed_stats */
+
+ /* Verify that the cache is clean */
+ verify_clean();
+
+ /* Verify the status of the entries. */
+ /* All entries are flushed, clean but still corked */
+ for (i = 0; i < 100; i++) {
+ expected[i].serialized = TRUE;
+ expected[i].is_dirty = FALSE;
+ expected[i].is_corked = TRUE;
+ }
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 0, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+ if ( show_progress) /* 8 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+
+ if ( pass ) {
+
+ /* Will evict 50 clean "corked" entries all at once when inserting the 100th entry */
+ for(i = 0; i < 51; i++) {
+ expected[i].in_cache = FALSE;
+ expected[i].destroyed = TRUE;
+ expected[i].is_corked = TRUE;
+ }
+
+ /* Insert the 100th entry */
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 100, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 100); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 100, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* The 100th inserted entry is now in the cache and dirty */
+ expected[100].in_cache = TRUE;
+ expected[100].is_dirty = TRUE;
+ expected[100].deserialized = (unsigned char)deserialized;
+ expected[100].is_corked = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 100, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+
+ }
+
+ if ( show_progress) /* 9 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) {
+
+ /* Insert 25 more corked entries (indexes 101 through 125) into the cache. */
+ /* Clean entry will be evicted one a time */
+ for (entry_idx = 101; entry_idx < 126; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* The inserted entry is now in the cache and dirty */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].deserialized = (unsigned char)deserialized;
+ expected[entry_idx].is_corked = TRUE;
+
+ expected[entry_idx - 50].in_cache = FALSE;
+ expected[entry_idx - 50].destroyed = TRUE;
+ expected[entry_idx - 50].is_corked = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ } /* end for */
+
+ }
+
+
+ if ( show_progress) /* 10 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+
+ if ( pass ) {
+
+ /* Insert the 127th entry (index = 126) into the cache. */
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 126, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 126); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ 126, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* Verify the status of the entries. */
+ expected[126].in_cache = TRUE;
+ expected[126].is_dirty = TRUE;
+ expected[126].deserialized = (unsigned char)deserialized;
+ expected[126].is_corked = TRUE;
+
+ expected[126 - 50].in_cache = FALSE;
+ expected[126 - 50].destroyed = TRUE;
+ expected[126 - 50].is_corked = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ 126, /* int tag */
+ 150, /* int num_entries */
+ expected); /* expected */
+ }
+
+
+ if ( show_progress) /* 11 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+
+ if ( pass ) {
+
+ /* Insert entries w/ indices 127 through 149 into the cache */
+ for (entry_idx = 127; entry_idx < 150; entry_idx++) {
+
+ if (fill_via_insertion) {
+ insert_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__NO_FLAGS_SET); /* unsigned int flags */
+
+ } else {
+ protect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx); /* int32-t idx */
+
+ unprotect_entry(file_ptr, /* H5F_t * file_ptr */
+ entry_type, /* int32_t type */
+ entry_idx, /* int32_t idx */
+ H5C__DIRTIED_FLAG); /* unsigned int flags */
+ }
+
+ /* This past inserted entry is now in the cache, dirty and corked */
+ expected[entry_idx].in_cache = TRUE;
+ expected[entry_idx].is_dirty = TRUE;
+ expected[entry_idx].deserialized = (unsigned char)deserialized;
+ expected[entry_idx].is_corked = TRUE;
+
+ /* Entry that is 50 entries away will be evicted since it is clean even though corked */
+ expected[entry_idx - 50].in_cache = FALSE;
+ expected[entry_idx - 50].destroyed = TRUE;
+ expected[entry_idx - 50].is_corked = TRUE;
+
+ /* verify the status */
+ verify_entry_status(cache_ptr, /* H5C_t * cache_ptr */
+ entry_idx, /* int tag */
+ 150, /* int num_entries */
+ expected); /* struct expected_entry_staus[] */
+ }
+
+ }
+
+ if ( show_progress) /* 12 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+
+ /* We're done with testing. We can take down the cache. */
+ takedown_cache(file_ptr, FALSE, FALSE);
+ reset_entries();
+
+ if ( show_progress) /* 13 */
+ HDfprintf(stdout, "%s: check point %d -- pass %d\n",
+ fcn_name, checkpoint++, pass);
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ return (unsigned)!pass;
+
+} /* check_metadata_cork() */
+
+
+/*-------------------------------------------------------------------------
* Function: check_entry_deletions_during_scans()
*
* Purpose: With the addition of the H5C__TAKE_OWNERSHIP_FLAG, it is
@@ -33015,7 +34366,6 @@ done:
*
*-------------------------------------------------------------------------
*/
-
static unsigned
check_entry_deletions_during_scans(void)
{
@@ -33065,10 +34415,8 @@ check_entry_deletions_during_scans(void)
}
- if ( pass ) {
- takedown_cache(file_ptr, FALSE, FALSE);
- }
+ takedown_cache(file_ptr, FALSE, FALSE);
if ( pass ) { PASSED(); } else { H5_FAILED(); }
@@ -33125,12 +34473,12 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr)
* array only processes as much of it as it is told to, we don't have to
* worry about maintaining the status of entries that we haven't used yet.
*/
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par idx: dep ref.count: dep height: order: */
- { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked: */
+ { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE}
};
if ( pass ) {
@@ -33363,44 +34711,44 @@ cedds__H5C_make_space_in_cache(H5F_t * file_ptr)
* array only processes as much of it as it is told to, we don't have to
* worry about maintaining the status of entries that we haven't used yet.
*/
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par idx: dep ref.count: dep height: order: */
- { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 31, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked: */
+ { HUGE_ENTRY_TYPE, 0, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 1, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 2, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { HUGE_ENTRY_TYPE, 3, HUGE_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 31, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
};
if ( pass ) {
@@ -33749,40 +35097,40 @@ cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * file_ptr)
* array only processes as much of it as it is told to, we don't have to
* worry about maintaining the status of entries that we haven't used yet.
*/
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par idx: dep ref.count: dep height: order: */
- { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 31, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked: */
+ { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 1, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 2, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 3, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 4, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 5, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 6, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 7, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 9, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 10, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 11, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 12, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 13, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 14, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 15, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 17, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 18, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 19, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 20, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 21, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 22, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 23, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 25, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 26, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 27, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 28, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 29, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 30, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 31, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,-1,-1,-1,-1,-1,-1,-1}, {-1,-1,-1,-1,-1,-1,-1,-1}, 0, 0, 0, -1, FALSE},
};
H5C_auto_size_ctl_t saved_auto_size_ctl;
H5C_auto_size_ctl_t test_auto_size_ctl =
@@ -34213,13 +35561,13 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t * file_ptr)
* array only processes as much of it as it is told to, we don't have to
* worry about maintaining the status of entries that we haven't used yet.
*/
- /* entry entry in at main flush dep flush dep child flush flush flush */
- /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type: par idx: dep ref.count: dep height: order: */
- { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, -1, -1, {1,0,0,0,0,0}, 1, -1 },
- { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, -1, -1, {0,0,0,0,0,0}, 0, -1 },
- { MONSTER_ENTRY_TYPE, 31, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, MONSTER_ENTRY_TYPE, 0, {0,0,0,0,0,0}, 0, -1 }
+ /* entry entry in at main flush dep flush dep child flush flush flush */
+ /* type: index: size: cache: addr: dirty: prot: pinned: dsrlzd: srlzd: dest: par type[]: par idx[]: dep npart: dep nchd: dep ndirty chd: order: corked */
+ { MONSTER_ENTRY_TYPE, 0, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, FALSE, {-1,0,0,0,0,0,0,0}, {-1,0,0,0,0,0,0,0}, 0, 1, 1, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 8, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,0,0,0,0,0,0,0}, {-1,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 16, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,0,0,0,0,0,0,0}, {-1,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 24, MONSTER_ENTRY_SIZE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, {-1,0,0,0,0,0,0,0}, {-1,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE},
+ { MONSTER_ENTRY_TYPE, 31, MONSTER_ENTRY_SIZE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, {MONSTER_ENTRY_TYPE,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 1, 0, 0, -1, FALSE},
};
if ( pass ) {
@@ -35114,8 +36462,10 @@ main(void)
nerrs += check_resize_entry_errs();
nerrs += check_unprotect_ro_dirty_err();
nerrs += check_protect_ro_rw_err();
+ nerrs += check_protect_retries();
nerrs += check_check_evictions_enabled_err();
- nerrs += check_auto_cache_resize();
+ nerrs += check_auto_cache_resize(FALSE);
+ nerrs += check_auto_cache_resize(TRUE);
nerrs += check_auto_cache_resize_disable();
nerrs += check_auto_cache_resize_epoch_markers();
nerrs += check_auto_cache_resize_input_errs();
@@ -35126,7 +36476,9 @@ main(void)
nerrs += check_flush_deps_err();
nerrs += check_flush_deps_order();
nerrs += check_notify_cb();
- nerrs += check_entry_deletions_during_scans();
+ nerrs += check_metadata_cork(TRUE);
+ nerrs += check_metadata_cork(FALSE);
+ nerrs += check_entry_deletions_during_scans(); /* OK NOW */
nerrs += check_stats();
return(nerrs > 0);
diff --git a/test/cache_common.c b/test/cache_common.c
index a3287cf..0433f1c 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -21,6 +21,7 @@
*/
#include "h5test.h"
#include "H5Cprivate.h"
+#include "H5ACprivate.h"
#include "H5Iprivate.h"
#include "H5MFprivate.h"
#include "H5MMprivate.h"
@@ -76,17 +77,30 @@ static test_entry_t notify_entries[NUM_NOTIFY_ENTRIES], orig_notify_entries[NUM_
hbool_t orig_entry_arrays_init = FALSE;
-static herr_t pico_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t nano_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t micro_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t tiny_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t small_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t medium_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t large_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t huge_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t monster_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t variable_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
-static herr_t notify_get_load_size(const void *udata_ptr, size_t *image_len_ptr);
+static herr_t pico_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t nano_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t micro_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t tiny_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t small_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t medium_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t large_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t huge_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t monster_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t variable_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+static herr_t notify_get_load_size(const void *image_ptr, void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len);
+
+static htri_t variable_verify_chksum(const void *image_ptr, size_t len, void *udata_ptr);
static void *pico_deserialize(const void *image_ptr, size_t len, void *udata_ptr,
hbool_t *dirty_ptr);
@@ -206,9 +220,12 @@ static herr_t notify_free_icr(void *thing);
static herr_t notify_notify(H5C_notify_action_t action, void *thing);
+static void mark_flush_dep_dirty(test_entry_t * entry_ptr);
+static void mark_flush_dep_clean(test_entry_t * entry_ptr);
+
/* Generic callback routines */
-static herr_t get_load_size(const void *udata_ptr, size_t *image_len_ptr,
- int32_t entry_type);
+static herr_t get_load_size(const void *image_ptr, void *udata_ptr, size_t *image_len_ptr,
+ size_t *actual_len_ptr, int32_t entry_type);
static void *deserialize(const void *image_ptr, size_t len, void *udata_ptr,
hbool_t *dirty_ptr, int32_t entry_type);
static herr_t image_len(void *thing, size_t *image_len_ptr, int32_t entry_type);
@@ -342,6 +359,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)pico_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)pico_deserialize,
(H5C_image_len_func_t)pico_image_len,
(H5AC_pre_serialize_func_t)pico_pre_serialize,
@@ -357,6 +375,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)nano_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)nano_deserialize,
(H5C_image_len_func_t)nano_image_len,
(H5AC_pre_serialize_func_t)nano_pre_serialize,
@@ -372,6 +391,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)micro_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)micro_deserialize,
(H5C_image_len_func_t)micro_image_len,
(H5AC_pre_serialize_func_t)micro_pre_serialize,
@@ -387,6 +407,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)tiny_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)tiny_deserialize,
(H5C_image_len_func_t)tiny_image_len,
(H5AC_pre_serialize_func_t)tiny_pre_serialize,
@@ -402,6 +423,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)small_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)small_deserialize,
(H5C_image_len_func_t)small_image_len,
(H5AC_pre_serialize_func_t)small_pre_serialize,
@@ -417,6 +439,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)medium_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)medium_deserialize,
(H5C_image_len_func_t)medium_image_len,
(H5AC_pre_serialize_func_t)medium_pre_serialize,
@@ -432,6 +455,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)large_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)large_deserialize,
(H5C_image_len_func_t)large_image_len,
(H5AC_pre_serialize_func_t)large_pre_serialize,
@@ -447,6 +471,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)huge_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)huge_deserialize,
(H5C_image_len_func_t)huge_image_len,
(H5AC_pre_serialize_func_t)huge_pre_serialize,
@@ -462,6 +487,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)monster_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)monster_deserialize,
(H5C_image_len_func_t)monster_image_len,
(H5AC_pre_serialize_func_t)monster_pre_serialize,
@@ -477,6 +503,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_SPECULATIVE_LOAD_FLAG,
(H5C_get_load_size_func_t)variable_get_load_size,
+ (H5C_verify_chksum_func_t)variable_verify_chksum,
(H5C_deserialize_func_t)variable_deserialize,
(H5C_image_len_func_t)variable_image_len,
(H5AC_pre_serialize_func_t)variable_pre_serialize,
@@ -492,6 +519,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
H5FD_MEM_DEFAULT,
H5C__CLASS_NO_FLAGS_SET,
(H5C_get_load_size_func_t)notify_get_load_size,
+ (H5C_verify_chksum_func_t)NULL,
(H5C_deserialize_func_t)notify_deserialize,
(H5C_image_len_func_t)notify_image_len,
(H5AC_pre_serialize_func_t)notify_pre_serialize,
@@ -629,7 +657,7 @@ check_write_permitted(const H5F_t H5_ATTR_UNUSED *f, hbool_t *write_permitted_pt
*-------------------------------------------------------------------------
*/
static herr_t
-get_load_size(const void *udata, size_t *image_length, int32_t entry_type)
+get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len, int32_t entry_type)
{
test_entry_t *entry;
test_entry_t *base_addr;
@@ -652,75 +680,139 @@ get_load_size(const void *udata, size_t *image_length, int32_t entry_type)
HDassert(entry == entry->self);
HDassert(entry->addr == addr);
- *image_length = entry->size;
+ if(image == NULL)
+ *image_length = entry->size;
+ else {
+ /* Simulate SPECULATIVE read with a specified actual_len */
+ if(type == VARIABLE_ENTRY_TYPE && entry->actual_len) {
+ *actual_len = entry->actual_len;
+ entry->size = entry->actual_len;
+ } else
+ *actual_len = entry->size;
+ }
return(SUCCEED);
} /* get_load_size() */
static herr_t
-pico_get_load_size(const void *udata, size_t *image_length)
+pico_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, PICO_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, PICO_ENTRY_TYPE);
}
static herr_t
-nano_get_load_size(const void *udata, size_t *image_length)
+nano_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, NANO_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, NANO_ENTRY_TYPE);
}
static herr_t
-micro_get_load_size(const void *udata, size_t *image_length)
+micro_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, MICRO_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, MICRO_ENTRY_TYPE);
}
static herr_t
-tiny_get_load_size(const void *udata, size_t *image_length)
+tiny_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, TINY_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, TINY_ENTRY_TYPE);
}
static herr_t
-small_get_load_size(const void *udata, size_t *image_length)
+small_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, SMALL_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, SMALL_ENTRY_TYPE);
}
static herr_t
-medium_get_load_size(const void *udata, size_t *image_length)
+medium_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, MEDIUM_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, MEDIUM_ENTRY_TYPE);
}
static herr_t
-large_get_load_size(const void *udata, size_t *image_length)
+large_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, LARGE_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, LARGE_ENTRY_TYPE);
}
static herr_t
-huge_get_load_size(const void *udata, size_t *image_length)
+huge_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, HUGE_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, HUGE_ENTRY_TYPE);
}
static herr_t
-monster_get_load_size(const void *udata, size_t *image_length)
+monster_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, MONSTER_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, MONSTER_ENTRY_TYPE);
}
static herr_t
-variable_get_load_size(const void *udata, size_t *image_length)
+variable_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
{
- return get_load_size(udata, image_length, VARIABLE_ENTRY_TYPE);
+ return get_load_size(image, udata, image_length, actual_len, VARIABLE_ENTRY_TYPE);
}
static herr_t
-notify_get_load_size(const void *udata, size_t *image_length)
+notify_get_load_size(const void *image, void *udata, size_t *image_length, size_t *actual_len)
+{
+ return get_load_size(image, udata, image_length, actual_len, NOTIFY_ENTRY_TYPE);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_chksum & friends
+ * (only done for VARIABLE_ENTRY_TYPE which has a speculative read)
+ *
+ * Purpose: Simulate checksum verification:
+ * --check is ok only after 'max_verify_ct' is reached
+ * --otherwise check is not ok
+ *
+ * Return: TRUE: checksum is ok
+ * FALSE: checksum is not ok
+ *
+ * Programmer:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static htri_t
+verify_chksum(const void H5_ATTR_UNUSED *image, size_t H5_ATTR_UNUSED len, void *udata, int32_t entry_type)
+{
+ test_entry_t *entry;
+ test_entry_t *base_addr;
+ haddr_t addr = *(const haddr_t *)udata;
+ int32_t type;
+ int32_t idx;
+
+ addr_to_type_and_index(addr, &type, &idx);
+
+ base_addr = entries[type];
+ entry = &(base_addr[idx]);
+
+ HDassert(entry->type >= 0);
+ HDassert(entry->type == type);
+ HDassert(entry->type == entry_type);
+ HDassert(entry->type < NUMBER_OF_ENTRY_TYPES);
+ HDassert(type == VARIABLE_ENTRY_TYPE);
+ HDassert(entry->index == idx);
+ HDassert(entry->index >= 0);
+ HDassert(entry->index <= max_indices[type]);
+ HDassert(entry == entry->self);
+ HDassert(entry->addr == addr);
+
+ if(++entry->verify_ct >= entry->max_verify_ct)
+ return(TRUE);
+ else
+ return(FALSE);
+
+} /* verify_chksum() */
+
+static htri_t
+variable_verify_chksum(const void *image, size_t len, void *udata)
{
- return get_load_size(udata, image_length, NOTIFY_ENTRY_TYPE);
+ return verify_chksum(image, len, udata, VARIABLE_ENTRY_TYPE);
}
@@ -765,6 +857,19 @@ deserialize(const void *image, size_t len, void *udata, hbool_t *dirty,
HDassert(entry->size == len);
HDassert((entry->type == VARIABLE_ENTRY_TYPE) || (entry->size == entry_sizes[type]));
HDassert(dirty != NULL);
+ HDassert( entry->flush_dep_npar == 0 );
+ HDassert( entry->flush_dep_nchd == 0 );
+
+#if 1 /* JRM */
+ if ( ! ( ( entry->type == VARIABLE_ENTRY_TYPE ) ||
+ ( entry->size == entry_sizes[type] ) ) ) {
+
+ HDfprintf(stdout, "entry type/index/size = %d/%d/%ld\n",
+ (int)(entry->type),
+ (int)(entry->index),
+ (long)(entry->size));
+ }
+#endif /* JRM */
/* for now *dirty will always be FALSE */
*dirty = FALSE;
@@ -1361,6 +1466,11 @@ serialize(const H5F_t H5_ATTR_UNUSED *f, void *image_ptr, size_t len, void *thin
*/
entry->is_dirty = FALSE;
+ if(entry->flush_dep_npar > 0) {
+ HDassert(entry->flush_dep_ndirty_chd == 0);
+ mark_flush_dep_clean(entry);
+ } /* end if */
+
/* since the entry is about to be written to disk, we can mark it
* as initialized.
*/
@@ -2177,6 +2287,8 @@ reset_entries(void)
base_addr[j].is_read_only = FALSE;
base_addr[j].ro_ref_count = FALSE;
+ base_addr[j].is_corked = FALSE;
+
base_addr[j].is_pinned = FALSE;
base_addr[j].pinning_ref_count = 0;
base_addr[j].num_pins = 0;
@@ -2202,11 +2314,9 @@ reset_entries(void)
base_addr[j].destroyed = FALSE;
base_addr[j].expunged = FALSE;
- base_addr[j].flush_dep_par_type = -1;
- base_addr[j].flush_dep_par_idx = -1;
- for ( k = 0; k < H5C__NUM_FLUSH_DEP_HEIGHTS; k++ )
- base_addr[j].child_flush_dep_height_rc[k] = 0;
- base_addr[j].flush_dep_height = 0;
+ base_addr[j].flush_dep_npar = 0;
+ base_addr[j].flush_dep_nchd = 0;
+ base_addr[j].flush_dep_ndirty_chd = 0;
base_addr[j].pinned_from_client = FALSE;
base_addr[j].pinned_from_cache = FALSE;
@@ -2215,6 +2325,10 @@ reset_entries(void)
base_addr[j].notify_after_insert_count = 0;
base_addr[j].notify_before_evict_count = 0;
+ base_addr[j].actual_len = 0;
+ base_addr[j].max_verify_ct = 0;
+ base_addr[j].verify_ct = 0;
+
addr += (haddr_t)entry_size;
alt_addr += (haddr_t)entry_size;
} /* end for */
@@ -2306,11 +2420,16 @@ resize_entry(H5F_t * file_ptr,
failure_mssg = "entry to be resized is not pinned or protected.";
} else {
+ hbool_t was_dirty = entry_ptr->is_dirty;
entry_ptr->size = new_size;
result = H5C_resize_entry((void *)entry_ptr, new_size);
entry_ptr->is_dirty = TRUE;
+ if(entry_ptr->flush_dep_npar > 0
+ && entry_ptr->flush_dep_ndirty_chd == 0
+ && !was_dirty)
+ mark_flush_dep_dirty(entry_ptr);
if ( result != SUCCEED ) {
@@ -2594,6 +2713,22 @@ verify_entry_status(H5C_t * cache_ptr,
}
}
+ if ( pass ) {
+
+ if ( entry_ptr->is_corked != expected[i].is_corked) {
+
+ pass = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) is_corked actual/expected = %d/%d.\n",
+ tag,
+ (int)expected[i].entry_type,
+ (int)expected[i].entry_index,
+ (int)(entry_ptr->is_corked),
+ (int)expected[i].is_corked);
+ failure_mssg = msg;
+ }
+ }
+
if ( ( pass ) && ( in_cache ) ) {
if ( entry_ptr->header.is_pinned != expected[i].is_pinned ) {
@@ -2634,105 +2769,119 @@ verify_entry_status(H5C_t * cache_ptr,
/* Check flush dependency fields */
- /* Flush dependency parent type & index */
- if ( pass ) {
- if ( entry_ptr->flush_dep_par_type != expected[i].flush_dep_par_type ) {
- pass = FALSE;
- sprintf(msg,
- "%d entry (%d, %d) flush_dep_par_type actual/expected = %d/%d.\n",
- tag,
- expected[i].entry_type,
- expected[i].entry_index,
- entry_ptr->flush_dep_par_type,
- expected[i].flush_dep_par_type);
- failure_mssg = msg;
- } /* end if */
- } /* end if */
- if ( pass ) {
- if ( entry_ptr->flush_dep_par_idx != expected[i].flush_dep_par_idx ) {
- pass = FALSE;
- sprintf(msg,
- "%d entry (%d, %d) flush_dep_par_idx actual/expected = %d/%d.\n",
- tag,
- expected[i].entry_type,
- expected[i].entry_index,
- entry_ptr->flush_dep_par_idx,
- expected[i].flush_dep_par_idx);
- failure_mssg = msg;
- } /* end if */
- } /* end if */
- if ( ( pass ) && ( in_cache ) && expected[i].flush_dep_par_idx >= 0 ) {
- test_entry_t * par_base_addr = entries[expected[i].flush_dep_par_type];
-
- if ( entry_ptr->header.flush_dep_parent != (H5C_cache_entry_t *)&(par_base_addr[expected[i].flush_dep_par_idx]) ) {
- pass = FALSE;
- sprintf(msg,
- "%d entry (%d, %d) header flush_dep_parent actual/expected = %p/%p.\n",
- tag,
- expected[i].entry_type,
- expected[i].entry_index,
- (void *)entry_ptr->header.flush_dep_parent,
- (void *)&(par_base_addr[expected[i].flush_dep_par_idx]));
- failure_mssg = msg;
- } /* end if */
- } /* end if */
+ /* # of flush dependency parents */
+ if ( pass ) {
+ if ( entry_ptr->flush_dep_npar != expected[i].flush_dep_npar ) {
+ pass = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) flush_dep_npar actual/expected = %u/%u.\n",
+ tag,
+ expected[i].entry_type,
+ expected[i].entry_index,
+ entry_ptr->flush_dep_npar,
+ expected[i].flush_dep_npar);
+ failure_mssg = msg;
+ } /* end if */
+ } /* end if */
+ if ( ( pass ) && ( in_cache ) ) {
+ if ( entry_ptr->header.flush_dep_nparents != expected[i].flush_dep_npar ) {
+ pass = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) header flush_dep_nparents actual/expected = %u/%u.\n",
+ tag,
+ expected[i].entry_type,
+ expected[i].entry_index,
+ entry_ptr->header.flush_dep_nparents,
+ expected[i].flush_dep_npar);
+ failure_mssg = msg;
+ } /* end if */
+ } /* end if */
- /* Flush dependency child ref. counts */
- for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++) {
- if ( pass ) {
- if ( entry_ptr->child_flush_dep_height_rc[u] != expected[i].child_flush_dep_height_rc[u] ) {
+ /* Flush dependency parent type & index. Note this algorithm assumes
+ * that the parents in both arrays are in the same order. */
+ if ( pass ) {
+ for ( u = 0; u < entry_ptr->flush_dep_npar; u++ ) {
+ if ( entry_ptr->flush_dep_par_type[u] != expected[i].flush_dep_par_type[u] ) {
pass = FALSE;
sprintf(msg,
- "%d entry (%d, %d) child_flush_dep_height_rc[%u] actual/expected = %llu/%llu.\n",
+ "%d entry (%d, %d) flush_dep_par_type[%u] actual/expected = %d/%d.\n",
tag,
expected[i].entry_type,
expected[i].entry_index,
u,
- (unsigned long long)(entry_ptr->child_flush_dep_height_rc[u]),
- (unsigned long long)expected[i].child_flush_dep_height_rc[u]);
+ entry_ptr->flush_dep_par_type[u],
+ expected[i].flush_dep_par_type[u]);
failure_mssg = msg;
} /* end if */
- } /* end if */
- if ( ( pass ) && ( in_cache ) ) {
- if ( entry_ptr->header.child_flush_dep_height_rc[u] != expected[i].child_flush_dep_height_rc[u] ) {
+ } /* end for */
+ } /* end if */
+ if ( pass ) {
+ for ( u = 0; u < entry_ptr->flush_dep_npar; u++ ) {
+ if ( entry_ptr->flush_dep_par_idx[u] != expected[i].flush_dep_par_idx[u] ) {
pass = FALSE;
sprintf(msg,
- "%d entry (%d, %d) header child_flush_dep_height_rc[%u] actual/expected = %llu/%llu.\n",
+ "%d entry (%d, %d) flush_dep_par_idx[%u] actual/expected = %d/%d.\n",
+ tag,
+ expected[i].entry_type,
+ expected[i].entry_index,
+ u,
+ entry_ptr->flush_dep_par_idx[u],
+ expected[i].flush_dep_par_idx[u]);
+ failure_mssg = msg;
+ } /* end if */
+ } /* end for */
+ } /* end if */
+
+ /* # of flush dependency children and dirty children */
+ if ( pass ) {
+ if ( entry_ptr->flush_dep_nchd != expected[i].flush_dep_nchd ) {
+ pass = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) flush_dep_nchd actual/expected = %u/%u.\n",
tag,
expected[i].entry_type,
expected[i].entry_index,
- u,
- (unsigned long long)entry_ptr->header.child_flush_dep_height_rc[u],
- (unsigned long long)expected[i].child_flush_dep_height_rc[u]);
- failure_mssg = msg;
- } /* end if */
+ entry_ptr->flush_dep_nchd,
+ expected[i].flush_dep_nchd);
+ failure_mssg = msg;
} /* end if */
- } /* end for */
-
- /* Flush dependency height */
+ } /* end if */
+ if ( ( pass ) && ( in_cache ) ) {
+ if ( entry_ptr->header.flush_dep_nchildren != expected[i].flush_dep_nchd ) {
+ pass = FALSE;
+ sprintf(msg,
+ "%d entry (%d, %d) header flush_dep_nchildren actual/expected = %u/%u.\n",
+ tag,
+ expected[i].entry_type,
+ expected[i].entry_index,
+ entry_ptr->header.flush_dep_nchildren,
+ expected[i].flush_dep_nchd);
+ failure_mssg = msg;
+ } /* end if */
+ } /* end if */
if ( pass ) {
- if ( entry_ptr->flush_dep_height != expected[i].flush_dep_height ) {
+ if ( entry_ptr->flush_dep_ndirty_chd != expected[i].flush_dep_ndirty_chd ) {
pass = FALSE;
sprintf(msg,
- "%d entry (%d, %d) flush_dep_height actual/expected = %u/%u.\n",
+ "%d entry (%d, %d) flush_dep_ndirty_chd actual/expected = %u/%u.\n",
tag,
expected[i].entry_type,
expected[i].entry_index,
- entry_ptr->flush_dep_height,
- expected[i].flush_dep_height);
+ entry_ptr->flush_dep_ndirty_chd,
+ expected[i].flush_dep_ndirty_chd);
failure_mssg = msg;
} /* end if */
} /* end if */
if ( ( pass ) && ( in_cache ) ) {
- if ( entry_ptr->header.flush_dep_height != expected[i].flush_dep_height ) {
+ if ( entry_ptr->header.flush_dep_ndirty_children != expected[i].flush_dep_ndirty_chd ) {
pass = FALSE;
sprintf(msg,
- "%d entry (%d, %d) header flush_dep_height actual/expected = %u/%u.\n",
- tag,
- expected[i].entry_type,
- expected[i].entry_index,
- entry_ptr->header.flush_dep_height,
- expected[i].flush_dep_height);
+ "%d entry (%d, %d) header flush_dep_ndirty_children actual/expected = %u/%u.\n",
+ tag,
+ expected[i].entry_type,
+ expected[i].entry_index,
+ entry_ptr->header.flush_dep_ndirty_children,
+ expected[i].flush_dep_ndirty_chd);
failure_mssg = msg;
} /* end if */
} /* end if */
@@ -2752,7 +2901,7 @@ verify_entry_status(H5C_t * cache_ptr,
} /* end if */
} /* end if */
- i++;
+ i++;
} /* while */
return;
@@ -3351,6 +3500,84 @@ flush_cache(H5F_t * file_ptr,
/*-------------------------------------------------------------------------
+ * Function: cork_entry_type()
+ *
+ * Purpose: To "cork" an object:
+ * --insert the base address of an entry type into
+ * the cache's list of corked object addresses
+ *
+ * Return: void
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cork_entry_type(H5F_t * file_ptr, int32_t type)
+{
+ H5C_t * cache_ptr;
+ haddr_t baddrs;
+ herr_t result;
+
+ if(pass) {
+ cache_ptr = file_ptr->shared->cache;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+
+ baddrs = base_addrs[type];
+
+ result = H5C_cork(cache_ptr, baddrs, H5C__SET_CORK, NULL);
+ if(result < 0) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_cork().";
+ }
+ }
+ return;
+} /* cork_entry_type() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: uncork_entry_type()
+ *
+ * Purpose: To "uncork" an object:
+ * --insert the base address of an entry type into
+ * the cache's list of corked object addresses
+ *
+ * Return: void
+ *
+ * Programmer: Vailin Choi; Jan 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+uncork_entry_type(H5F_t * file_ptr, int32_t type)
+{
+ H5C_t * cache_ptr;
+ haddr_t baddrs;
+ herr_t result;
+
+ if(pass) {
+ cache_ptr = file_ptr->shared->cache;
+
+ HDassert( cache_ptr );
+ HDassert( ( 0 <= type ) && ( type < NUMBER_OF_ENTRY_TYPES ) );
+
+ baddrs = base_addrs[type];
+
+ result = H5C_cork(cache_ptr, baddrs, H5C__UNCORK, NULL);
+ if(result < 0) {
+
+ pass = FALSE;
+ failure_mssg = "error in H5C_cork().";
+ }
+ }
+ return;
+} /* uncork_entry_type() */
+
+
+/*-------------------------------------------------------------------------
* Function: insert_entry()
*
* Purpose: Insert the entry indicated by the type and index.
@@ -3373,9 +3600,11 @@ insert_entry(H5F_t * file_ptr,
{
H5C_t * cache_ptr;
herr_t result;
+ hid_t xfer = H5AC_ind_dxpl_id;
hbool_t insert_pinned;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
+ haddr_t baddrs;
if ( pass ) {
@@ -3387,17 +3616,27 @@ insert_entry(H5F_t * file_ptr,
base_addr = entries[type];
entry_ptr = &(base_addr[idx]);
+ baddrs = base_addrs[type];
HDassert( entry_ptr->index == idx );
HDassert( entry_ptr->type == type );
HDassert( entry_ptr == entry_ptr->self );
HDassert( !(entry_ptr->is_protected) );
+ HDassert( entry_ptr->flush_dep_npar == 0 );
+ HDassert( entry_ptr->flush_dep_nchd == 0 );
insert_pinned = (hbool_t)((flags & H5C__PIN_ENTRY_FLAG) != 0 );
entry_ptr->is_dirty = TRUE;
- result = H5C_insert_entry(file_ptr, H5P_DATASET_XFER_DEFAULT,
+ /* Set the base address of the entry type into the property list as tag */
+ /* Use to cork entries for the object */
+ if(H5AC_tag(xfer, baddrs, NULL) < 0) {
+ pass = FALSE;
+ failure_mssg = "error in H5P_set().";
+ }
+
+ result = H5C_insert_entry(file_ptr, xfer,
&(types[type]), entry_ptr->addr, (void *)entry_ptr, flags);
if ( ( result < 0 ) ||
@@ -3437,8 +3676,11 @@ insert_entry(H5F_t * file_ptr,
entry_ptr->is_pinned = insert_pinned;
entry_ptr->pinned_from_client = insert_pinned;
- HDassert(entry_ptr->header.is_dirty);
- HDassert(((entry_ptr->header).type)->id == type);
+ if(entry_ptr->header.is_corked)
+ entry_ptr->is_corked = TRUE;
+
+ HDassert( entry_ptr->header.is_dirty );
+ HDassert( ((entry_ptr->header).type)->id == type );
}
return;
@@ -3468,6 +3710,7 @@ mark_entry_dirty(int32_t type,
herr_t result;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
+ hbool_t was_dirty;
if ( pass ) {
@@ -3483,7 +3726,12 @@ mark_entry_dirty(int32_t type,
HDassert( entry_ptr->header.is_protected ||
entry_ptr->header.is_pinned );
+ was_dirty = entry_ptr->is_dirty;
entry_ptr->is_dirty = TRUE;
+ if(entry_ptr->flush_dep_npar > 0
+ && entry_ptr->flush_dep_ndirty_chd == 0
+ && !was_dirty)
+ mark_flush_dep_dirty(entry_ptr);
result = H5C_mark_entry_dirty((void *)entry_ptr);
@@ -3576,8 +3824,13 @@ move_entry(H5C_t * cache_ptr,
}
if ( ! done ) {
+ hbool_t was_dirty = entry_ptr->is_dirty;
entry_ptr->is_dirty = TRUE;
+ if(entry_ptr->flush_dep_npar > 0
+ && entry_ptr->flush_dep_ndirty_chd == 0
+ && !was_dirty)
+ mark_flush_dep_dirty(entry_ptr);
result = H5C_move_entry(cache_ptr, &(types[type]),
old_addr, new_addr);
@@ -3633,6 +3886,8 @@ protect_entry(H5F_t * file_ptr,
H5C_t * cache_ptr;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
+ haddr_t baddrs;
+ hid_t xfer = H5AC_ind_dxpl_id;
H5C_cache_entry_t * cache_entry_ptr;
if ( pass ) {
@@ -3645,14 +3900,23 @@ protect_entry(H5F_t * file_ptr,
base_addr = entries[type];
entry_ptr = &(base_addr[idx]);
+ baddrs = base_addrs[type];
HDassert( entry_ptr->index == idx );
HDassert( entry_ptr->type == type );
HDassert( entry_ptr == entry_ptr->self );
HDassert( !(entry_ptr->is_protected) );
- cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5P_DATASET_XFER_DEFAULT,
- &(types[type]), entry_ptr->addr, &entry_ptr->addr, H5C__NO_FLAGS_SET);
+ /* Set the base address of the entry type into the property list as tag */
+ /* Use to cork entries for the object */
+ if(H5AC_tag(xfer, baddrs, NULL) < 0) {
+ pass = FALSE;
+ failure_mssg = "error in H5P_set().";
+ }
+
+ cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, xfer,
+ &(types[type]), entry_ptr->addr, &entry_ptr->addr,
+ H5C__NO_FLAGS_SET);
if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
( !(entry_ptr->header.is_protected) ) ||
@@ -3681,6 +3945,10 @@ protect_entry(H5F_t * file_ptr,
HDfprintf(stdout,
"entry_ptr->addr = %d, entry_ptr->header.addr = %d\n",
(int)(entry_ptr->addr), (int)(entry_ptr->header.addr));
+ HDfprintf(stdout,
+ "entry_ptr->verify_ct = %d, entry_ptr->max_verify_ct = %d\n",
+ entry_ptr->verify_ct, entry_ptr->max_verify_ct);
+ H5Eprint2(H5E_DEFAULT, stdout);
#endif
pass = FALSE;
failure_mssg = "error in H5C_protect().";
@@ -3696,6 +3964,9 @@ protect_entry(H5F_t * file_ptr,
}
+ if(entry_ptr->header.is_corked)
+ entry_ptr->is_corked = TRUE;
+
HDassert( ((entry_ptr->header).type)->id == type );
}
@@ -3955,8 +4226,15 @@ unprotect_entry(H5F_t * file_ptr,
HDassert ( ( ! pin_flag_set ) || ( ! (entry_ptr->is_pinned) ) );
HDassert ( ( ! unpin_flag_set ) || ( entry_ptr->is_pinned ) );
- if(flags & H5C__DIRTIED_FLAG)
+ if(flags & H5C__DIRTIED_FLAG) {
+ hbool_t was_dirty = entry_ptr->is_dirty;
+
entry_ptr->is_dirty = TRUE;
+ if(entry_ptr->flush_dep_npar > 0
+ && entry_ptr->flush_dep_ndirty_chd == 0
+ && !was_dirty)
+ mark_flush_dep_dirty(entry_ptr);
+ } /* end if */
result = H5C_unprotect(file_ptr, H5P_DATASET_XFER_DEFAULT,
entry_ptr->addr, (void *)entry_ptr, flags);
@@ -5434,39 +5712,28 @@ create_flush_dependency(int32_t par_type,
if ( ( result < 0 ) ||
( !par_entry_ptr->header.is_pinned ) ||
- ( !(par_entry_ptr->header.flush_dep_height > 0) ) ) {
+ ( !(par_entry_ptr->header.flush_dep_nchildren > 0) ) ) {
pass = FALSE;
failure_mssg = "error in H5C_create_flush_dependency().";
} /* end if */
/* Update information about entries */
- chd_entry_ptr->flush_dep_par_type = par_type;
- chd_entry_ptr->flush_dep_par_idx = par_idx;
- par_entry_ptr->child_flush_dep_height_rc[chd_entry_ptr->flush_dep_height]++;
+ HDassert( chd_entry_ptr->flush_dep_npar < MAX_FLUSH_DEP_PARS );
+ chd_entry_ptr->flush_dep_par_type[chd_entry_ptr->flush_dep_npar] = par_type;
+ chd_entry_ptr->flush_dep_par_idx[chd_entry_ptr->flush_dep_npar] = par_idx;
+ chd_entry_ptr->flush_dep_npar++;
+ par_entry_ptr->flush_dep_nchd++;
+ if(chd_entry_ptr->is_dirty || chd_entry_ptr->flush_dep_ndirty_chd > 0) {
+ HDassert(par_entry_ptr->flush_dep_ndirty_chd < par_entry_ptr->flush_dep_nchd);
+ par_entry_ptr->flush_dep_ndirty_chd++;
+ if(!par_entry_ptr->is_dirty
+ && par_entry_ptr->flush_dep_ndirty_chd == 1)
+ mark_flush_dep_dirty(par_entry_ptr);
+ } /* end if */
par_entry_ptr->pinned_from_cache = TRUE;
if( !par_is_pinned )
par_entry_ptr->is_pinned = TRUE;
-
- /* Check flush dependency heights */
- while(chd_entry_ptr->flush_dep_height >= par_entry_ptr->flush_dep_height) {
- unsigned prev_par_flush_dep_height = par_entry_ptr->flush_dep_height; /* Save the previous height */
-
- par_entry_ptr->flush_dep_height = chd_entry_ptr->flush_dep_height + 1;
-
- /* Check for parent entry being in flush dependency relationship */
- if(par_entry_ptr->flush_dep_par_idx >= 0) {
- /* Move parent & child entries up the flushd dependency 'chain' */
- chd_entry_ptr = par_entry_ptr;
- par_base_addr = entries[chd_entry_ptr->flush_dep_par_type];
- par_entry_ptr = &(par_base_addr[chd_entry_ptr->flush_dep_par_idx]);
-
- /* Adjust the ref. counts in new parent */
- HDassert(par_entry_ptr->child_flush_dep_height_rc[prev_par_flush_dep_height] > 0);
- par_entry_ptr->child_flush_dep_height_rc[prev_par_flush_dep_height]--;
- par_entry_ptr->child_flush_dep_height_rc[chd_entry_ptr->flush_dep_height]++;
- } /* end if */
- } /* end if */
} /* end if */
return;
@@ -5505,18 +5772,16 @@ destroy_flush_dependency(int32_t par_type,
test_entry_t * par_entry_ptr; /* Parent entry */
test_entry_t * chd_base_addr; /* Base entry of child's entry array */
test_entry_t * chd_entry_ptr; /* Child entry */
- unsigned chd_flush_dep_height; /* Child flush dep. height */
+ unsigned i; /* Local index variable */
/* Get parent entry */
par_base_addr = entries[par_type];
par_entry_ptr = &(par_base_addr[par_idx]);
/* Sanity check parent entry */
- HDassert( par_entry_ptr->index == par_idx );
- HDassert( par_entry_ptr->type == par_type );
HDassert( par_entry_ptr->is_pinned );
HDassert( par_entry_ptr->pinned_from_cache );
- HDassert( par_entry_ptr->flush_dep_height > 0 );
+ HDassert( par_entry_ptr->flush_dep_nchd > 0 );
HDassert( par_entry_ptr == par_entry_ptr->self );
/* Get parent entry */
@@ -5526,7 +5791,7 @@ destroy_flush_dependency(int32_t par_type,
/* Sanity check child entry */
HDassert( chd_entry_ptr->index == chd_idx );
HDassert( chd_entry_ptr->type == chd_type );
- HDassert( chd_entry_ptr->flush_dep_height < par_entry_ptr->flush_dep_height );
+ HDassert( chd_entry_ptr->flush_dep_npar > 0 );
HDassert( chd_entry_ptr == chd_entry_ptr->self );
if ( H5C_destroy_flush_dependency(par_entry_ptr, chd_entry_ptr) < 0 ) {
@@ -5535,49 +5800,34 @@ destroy_flush_dependency(int32_t par_type,
} /* end if */
/* Update information about entries */
- chd_entry_ptr->flush_dep_par_type = -1;
- chd_entry_ptr->flush_dep_par_idx = -1;
- par_entry_ptr->child_flush_dep_height_rc[chd_entry_ptr->flush_dep_height]--;
-
- /* Check flush dependency heights */
- chd_flush_dep_height = chd_entry_ptr->flush_dep_height;
- while( 0 == par_entry_ptr->child_flush_dep_height_rc[chd_flush_dep_height] ) {
- unsigned prev_par_flush_dep_height = par_entry_ptr->flush_dep_height; /* Save the previous height */
- int i; /* Local index variable */
-
- /* Check for new flush dependency height of parent */
- for(i = (H5C__NUM_FLUSH_DEP_HEIGHTS - 1); i >= 0; i--)
- if(par_entry_ptr->child_flush_dep_height_rc[i] > 0)
- break;
-
- HDassert((i + 1) <= (int)prev_par_flush_dep_height);
-
- if((unsigned)(i + 1) < prev_par_flush_dep_height) {
- par_entry_ptr->flush_dep_height = (unsigned)(i + 1);
- if(i < 0) {
- par_entry_ptr->pinned_from_cache = FALSE;
- par_entry_ptr->is_pinned = par_entry_ptr->pinned_from_client;
- } /* end if */
-
- /* Check for parent entry being in flush dependency relationship */
- if(par_entry_ptr->flush_dep_par_idx >= 0) {
- /* Move parent & child entries up the flushd dependency 'chain' */
- chd_entry_ptr = par_entry_ptr;
- par_base_addr = entries[chd_entry_ptr->flush_dep_par_type];
- par_entry_ptr = &(par_base_addr[chd_entry_ptr->flush_dep_par_idx]);
-
- /* Adjust the ref. counts in new parent */
- HDassert(par_entry_ptr->child_flush_dep_height_rc[prev_par_flush_dep_height] > 0);
- par_entry_ptr->child_flush_dep_height_rc[prev_par_flush_dep_height]--;
- par_entry_ptr->child_flush_dep_height_rc[chd_entry_ptr->flush_dep_height]++;
- chd_flush_dep_height = prev_par_flush_dep_height;
- } /* end if */
- else
- break;
- } /* end if */
- else
+ for(i=0; i<chd_entry_ptr->flush_dep_npar; i++)
+ if(chd_entry_ptr->flush_dep_par_type[i] == par_type
+ && chd_entry_ptr->flush_dep_par_idx[i] == par_idx)
break;
- } /* end while */
+ HDassert(i < chd_entry_ptr->flush_dep_npar);
+ if(i < chd_entry_ptr->flush_dep_npar - 1)
+ HDmemmove(&chd_entry_ptr->flush_dep_par_type[i],
+ &chd_entry_ptr->flush_dep_par_type[i+1],
+ (chd_entry_ptr->flush_dep_npar - i - 1)
+ * sizeof(chd_entry_ptr->flush_dep_par_type[0]));
+ if(i < chd_entry_ptr->flush_dep_npar - 1)
+ HDmemmove(&chd_entry_ptr->flush_dep_par_idx[i],
+ &chd_entry_ptr->flush_dep_par_idx[i+1],
+ (chd_entry_ptr->flush_dep_npar - i - 1)
+ * sizeof(chd_entry_ptr->flush_dep_par_idx[0]));
+ chd_entry_ptr->flush_dep_npar--;
+ par_entry_ptr->flush_dep_nchd--;
+ if(par_entry_ptr->flush_dep_nchd == 0) {
+ par_entry_ptr->pinned_from_cache = FALSE;
+ par_entry_ptr->is_pinned = par_entry_ptr->pinned_from_client;
+ } /* end if */
+ if(chd_entry_ptr->is_dirty || chd_entry_ptr->flush_dep_ndirty_chd > 0) {
+ HDassert(par_entry_ptr->flush_dep_ndirty_chd > 0);
+ par_entry_ptr->flush_dep_ndirty_chd--;
+ if(!par_entry_ptr->is_dirty
+ && par_entry_ptr->flush_dep_ndirty_chd == 0)
+ mark_flush_dep_clean(par_entry_ptr);
+ } /* end if */
} /* end if */
return;
@@ -5585,6 +5835,108 @@ destroy_flush_dependency(int32_t par_type,
} /* destroy_flush_dependency() */
+/*-------------------------------------------------------------------------
+ * Function: mark_flush_dep_dirty()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming dirty or having its flush_dep_ndirty_children
+ * increased from 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 12/4/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+mark_flush_dep_dirty(test_entry_t * entry_ptr)
+{
+ test_entry_t * par_base_addr; /* Base entry of parent's entry array */
+ test_entry_t * par_entry_ptr; /* Parent entry */
+ unsigned i; /* Local index variable */
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert((entry_ptr->is_dirty && entry_ptr->flush_dep_ndirty_chd == 0)
+ || (!entry_ptr->is_dirty && entry_ptr->flush_dep_ndirty_chd == 1));
+
+ /* Iterate over the parent entries */
+ if(entry_ptr->flush_dep_npar) {
+ for(i=0; i<entry_ptr->flush_dep_npar; i++) {
+ /* Get parent entry */
+ par_base_addr = entries[entry_ptr->flush_dep_par_type[i]];
+ par_entry_ptr = &(par_base_addr[entry_ptr->flush_dep_par_idx[i]]);
+
+ /* Sanity check */
+ HDassert(par_entry_ptr->flush_dep_ndirty_chd
+ < par_entry_ptr->flush_dep_nchd);
+
+ /* Adjust the parent's number of dirty children */
+ par_entry_ptr->flush_dep_ndirty_chd++;
+
+ /* Propagate the flush dep dirty flag up the chain if necessary */
+ if(!par_entry_ptr->is_dirty
+ && par_entry_ptr->flush_dep_ndirty_chd == 1)
+ mark_flush_dep_dirty(par_entry_ptr);
+ } /* end for */
+ } /* end if */
+
+ return;
+} /* mark_flush_dep_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: mark_flush_dep_clean()
+ *
+ * Purpose: Recursively propagate the flush_dep_ndirty_children flag
+ * up the dependency chain in response to entry either
+ * becoming clean or having its flush_dep_ndirty_children
+ * reduced to 0.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * 12/4/12
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+mark_flush_dep_clean(test_entry_t * entry_ptr)
+{
+ test_entry_t * par_base_addr; /* Base entry of parent's entry array */
+ test_entry_t * par_entry_ptr; /* Parent entry */
+ unsigned i; /* Local index variable */
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(!entry_ptr->is_dirty && entry_ptr->flush_dep_ndirty_chd == 0);
+
+ /* Iterate over the parent entries */
+ if(entry_ptr->flush_dep_npar) {
+ for(i=0; i<entry_ptr->flush_dep_npar; i++) {
+ /* Get parent entry */
+ par_base_addr = entries[entry_ptr->flush_dep_par_type[i]];
+ par_entry_ptr = &(par_base_addr[entry_ptr->flush_dep_par_idx[i]]);
+
+ /* Sanity check */
+ HDassert(par_entry_ptr->flush_dep_ndirty_chd > 0);
+
+ /* Adjust the parent's number of dirty children */
+ par_entry_ptr->flush_dep_ndirty_chd--;
+
+ /* Propagate the flush dep dirty flag up the chain if necessary */
+ if(!par_entry_ptr->is_dirty
+ && par_entry_ptr->flush_dep_ndirty_chd == 0)
+ mark_flush_dep_clean(par_entry_ptr);
+ } /* end for */
+ } /* end if */
+
+ return;
+} /* mark_flush_dep_clean() */
+
+
/*** H5AC level utility functions ***/
diff --git a/test/cache_common.h b/test/cache_common.h
index c07ab25..3f08081 100644
--- a/test/cache_common.h
+++ b/test/cache_common.h
@@ -152,6 +152,9 @@
* cache entry.
*/
+#define MAX_FLUSH_DEP_PARS 8 /* Maximum number of flush dependency
+ * parents in the test */
+
typedef struct flush_op
{
int op_code; /* integer op code indicating the
@@ -284,6 +287,8 @@ typedef struct test_entry_t
hbool_t is_pinned; /* entry is currently pinned in
* the cache.
*/
+ haddr_t tag; /* the base_addr as tag for corking entries */
+ hbool_t is_corked; /* entry is currently corked or not */
int pinning_ref_count; /* Number of entries that
* pin this entry in the cache.
* When this count drops to zero,
@@ -338,19 +343,20 @@ typedef struct test_entry_t
hbool_t expunged; /* entry has been expunged since the
* last time it was reset.
*/
- int flush_dep_par_type; /* Entry type of flush dependency parent */
- int flush_dep_par_idx; /* Index of flush dependency parent */
- uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
- /* flush dependency heights of flush
- * dependency children
- */
- unsigned flush_dep_height; /* flush dependency height of entry */
- hbool_t pinned_from_client; /* entry was pinned by client call */
- hbool_t pinned_from_cache; /* entry was pinned by cache internally */
- unsigned flush_order; /* Order that entry was flushed in */
+ int flush_dep_par_type[MAX_FLUSH_DEP_PARS]; /* Entry types of flush dependency parents */
+ int flush_dep_par_idx[MAX_FLUSH_DEP_PARS]; /* Indices of flush dependency parents */
+ unsigned flush_dep_npar; /* Number of flush dependency parents */
+ unsigned flush_dep_nchd; /* Number of flush dependency children */
+ unsigned flush_dep_ndirty_chd; /* Number of dirty flush dependency children (including granchildren, etc.) */
+ hbool_t pinned_from_client; /* entry was pinned by client call */
+ hbool_t pinned_from_cache; /* entry was pinned by cache internally */
+ unsigned flush_order; /* Order that entry was flushed in */
unsigned notify_after_insert_count; /* Count of times that entry was inserted in cache */
unsigned notify_before_evict_count; /* Count of times that entry was removed in cache */
+ unsigned actual_len; /* Simulate the entry's actual size for a speculative load */
+ unsigned max_verify_ct; /* Maximum # of times to verify an entry's checksum */
+ unsigned verify_ct; /* Count the # of checksum verification for an entry */
} test_entry_t;
/* The following are cut down test versions of the hash table manipulation
@@ -518,14 +524,13 @@ struct expected_entry_status
hbool_t deserialized;
hbool_t serialized;
hbool_t destroyed;
- int flush_dep_par_type; /* Entry type of flush dependency parent */
- int flush_dep_par_idx; /* Index of flush dependency parent */
- uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
- /* flush dependency heights of flush
- * dependency children
- */
- unsigned flush_dep_height; /* flush dependency height of entry */
+ int flush_dep_par_type[MAX_FLUSH_DEP_PARS]; /* Entry types of flush dependency parents */
+ int flush_dep_par_idx[MAX_FLUSH_DEP_PARS]; /* Indices of flush dependency parents */
+ unsigned flush_dep_npar; /* Number of flush dependency parents */
+ unsigned flush_dep_nchd; /* Number of flush dependency children */
+ unsigned flush_dep_ndirty_chd; /* Number of dirty flush dependency children */
int flush_order; /* flush order of entry */
+ unsigned char is_corked; /* cork status of entry */
};
@@ -616,6 +621,11 @@ void create_pinned_entry_dependency(H5F_t * file_ptr,
void reset_entries(void);
+void cork_entry_type(H5F_t * file_ptr, int32_t type);
+void uncork_entry_type(H5F_t * file_ptr, int32_t type);
+
+
+
void resize_entry(H5F_t * file_ptr,
int32_t type,
int32_t idx,
diff --git a/test/cache_logging.c b/test/cache_logging.c
new file mode 100644
index 0000000..a5e399c
--- /dev/null
+++ b/test/cache_logging.c
@@ -0,0 +1,176 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Purpose: Tests the metadata cache logging framework */
+
+#include "h5test.h"
+
+#define LOG_LOCATION "cache_logging.out"
+#define FILE_NAME "cache_logging"
+
+#define N_GROUPS 100
+
+/*-------------------------------------------------------------------------
+ * Function: test_logging_api
+ *
+ * Purpose: Tests the API calls that affect mdc logging
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_logging_api(void)
+{
+ hid_t fapl = -1;
+ hbool_t is_enabled;
+ hbool_t is_enabled_out;
+ hbool_t start_on_access;
+ hbool_t start_on_access_out;
+ char *location = NULL;
+ size_t size;
+
+ hid_t fid;
+ hid_t gid;
+ hbool_t is_currently_logging;
+ char group_name[8];
+ char filename[1024];
+ int i;
+
+ TESTING("metadata cache log api calls");
+
+ fapl = h5_fileaccess();
+ h5_fixname(FILE_NAME, fapl, filename, sizeof filename);
+
+ /* Set up metadata cache logging */
+ is_enabled = TRUE;
+ start_on_access = FALSE;
+ if(H5Pset_mdc_log_options(fapl, is_enabled, LOG_LOCATION, start_on_access) < 0)
+ TEST_ERROR;
+
+ /* Check to make sure that the property list getter returns the correct
+ * location string buffer size;
+ */
+ is_enabled_out = FALSE;
+ start_on_access_out = TRUE;
+ location = NULL;
+ size = 999;
+ if(H5Pget_mdc_log_options(fapl, &is_enabled_out, location, &size,
+ &start_on_access_out) < 0)
+ TEST_ERROR;
+ if(size != strlen(LOG_LOCATION) + 1)
+ TEST_ERROR;
+
+ /* Check to make sure that the property list getter works */
+ if(NULL == (location = (char *)HDcalloc(size, sizeof(char))))
+ TEST_ERROR;
+ if(H5Pget_mdc_log_options(fapl, &is_enabled_out, location, &size,
+ &start_on_access_out) < 0)
+ TEST_ERROR;
+ if((is_enabled != is_enabled_out)
+ || (start_on_access != start_on_access_out)
+ || HDstrcmp(LOG_LOCATION, location))
+ TEST_ERROR;
+
+ /* Create a file */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR;
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR;
+ if(H5Pclose(fapl) < 0)
+ TEST_ERROR;
+
+
+ /* Check to see if the logging flags were set correctly */
+ is_enabled = FALSE;
+ is_currently_logging = TRUE;
+ if((H5Fget_mdc_logging_status(fid, &is_enabled, &is_currently_logging) < 0)
+ || (is_enabled != TRUE)
+ || (is_currently_logging != FALSE))
+ TEST_ERROR;
+
+ /* Turn on logging and check flags */
+ if(H5Fstart_mdc_logging(fid) < 0)
+ TEST_ERROR;
+ is_enabled = FALSE;
+ is_currently_logging = FALSE;
+ if((H5Fget_mdc_logging_status(fid, &is_enabled, &is_currently_logging) < 0)
+ || (is_enabled != TRUE)
+ || (is_currently_logging != TRUE))
+ TEST_ERROR;
+
+ /* Perform some manipulations */
+ for(i = 0; i < N_GROUPS; i++) {
+ HDmemset(group_name, 0, 8);
+ HDsnprintf(group_name, 8, "%d", i);
+ if((gid = H5Gcreate2(fid, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if(H5Gclose(gid) < 0)
+ TEST_ERROR;
+ }
+
+ /* Turn off logging and check flags */
+ if(H5Fstop_mdc_logging(fid) < 0)
+ TEST_ERROR;
+ is_enabled = FALSE;
+ is_currently_logging = TRUE;
+ if((H5Fget_mdc_logging_status(fid, &is_enabled, &is_currently_logging) < 0)
+ || (is_enabled != TRUE)
+ || (is_currently_logging != FALSE))
+ TEST_ERROR;
+
+ /* Clean up */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ return 1;
+ } /* test_logging_api() */
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Test basic cache logging operations
+ *
+ * Return: Success: zero
+ * Failure: non-zero
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ int nerrors = 0;
+
+ /* Reset library */
+ h5_reset();
+
+ printf("Testing basic metadata cache logging functionality.\n");
+
+ nerrors += test_logging_api();
+
+ if(nerrors) {
+ printf("***** %d Metadata cache logging TEST%s FAILED! *****\n",
+ nerrors, nerrors > 1 ? "S" : "");
+ return 1;
+ }
+
+ printf("All Metadata Cache Logging tests passed.\n");
+ return 0;
+}
+
diff --git a/test/cork.c b/test/cork.c
new file mode 100644
index 0000000..6478723
--- /dev/null
+++ b/test/cork.c
@@ -0,0 +1,2191 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: Vailin Choi
+ * Feb 20, 2014
+ *
+ * This file contains tests for:
+ * H5Odisable_mdc_flushes()
+ * H5Oenable_mdc_flushes()
+ * H5Oare_mdc_flushes_disabled()
+ */
+#include "hdf5.h"
+#include "testhdf5.h"
+#include "h5test.h"
+#include "H5Iprivate.h"
+#include "H5ACprivate.h"
+#include "H5ACpublic.h"
+#include "cache_common.h"
+#include "H5HLprivate.h"
+
+/* ============ */
+/* Test Defines */
+/* ============ */
+
+#define FILENAME "test_cork.h5"
+#define ATTR "ATTR"
+#define DSET "DSET"
+#define DSET_BT1 "DSET_BT1"
+#define DSET_COMPACT "DSET_COMPACT"
+#define DSET_CONTIG "DSET_CONTIG"
+#define DSET_EA "DSET_EA"
+#define DSET_BT2 "DSET_BT2"
+#define DSET_FA "DSET_FA"
+#define DSET_NONE "DSET_NONE"
+#define GRP "GRP"
+#define GRP2 "GRP2"
+#define GRP3 "GRP3"
+#define DT "DT"
+#define DT2 "DT2"
+#define DT3 "DT3"
+#define GRP_ATTR "GRP_ATTR"
+#define DSET_ATTR "DSET_ATTR"
+#define DT_ATTR "DT_ATTR"
+
+#define RANK 2
+#define DIM0 5
+#define DIM1 10
+#define DIMS0 50
+#define DIMS1 100
+
+
+/* ===================== */
+/* Function Declarations */
+/* ===================== */
+
+/* Helper Functions */
+static void print_entry_type_to_screen(int id);
+static int print_index(hid_t fid);
+static int verify_cork_tag(hid_t fid, haddr_t tag, hbool_t status);
+
+/* Tests */
+static int test_objs_cork(hbool_t new_format);
+static int test_dset_cork(hbool_t new_format);
+static int verify_old_dset_cork(void);
+static int verify_obj_dset_cork(hbool_t swmr);
+static int verify_dset_cork(hbool_t swmr, hbool_t new_format);
+static int verify_group_cork(hbool_t swmr);
+static int verify_named_cork(hbool_t swmr);
+static int verify_multiple_cork(hbool_t swmr);
+
+/* ================ */
+/* Helper Functions */
+/* ================ */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: print_entry_type_to_screen
+ * (copied from cache_tagging.c)
+ *
+ * Purpose: DEBUG CODE (for when verbose is set).
+ *
+ * Prints type of entry to stdout.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * September 3, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+print_entry_type_to_screen(int id)
+{
+ printf("Type = ");
+
+ switch (id) {
+
+ case 0:
+ printf("B-tree Node");
+ break;
+ case 1:
+ printf("Symbol Table Node");
+ break;
+ case 2:
+ printf("Local Heap Prefix");
+ break;
+ case 3:
+ printf("Local Heap Data Block");
+ break;
+ case 4:
+ printf("Global Heap");
+ break;
+ case 5:
+ printf("Object Header");
+ break;
+ case 6:
+ printf("Object Header Chunk");
+ break;
+ case 7:
+ printf("v2 B-tree Header");
+ break;
+ case 8:
+ printf("v2 B-tree Internal Node");
+ break;
+ case 9:
+ printf("v2 B-tree Leaf Node");
+ break;
+ case 10:
+ printf("Fractal Heap Header");
+ break;
+ case 11:
+ printf("Fractal Heap Direct Block");
+ break;
+ case 12:
+ printf("Fractal Heap Indirect Block");
+ break;
+ case 13:
+ printf("Free Space Header");
+ break;
+ case 14:
+ printf("Free Space Section");
+ break;
+ case 15:
+ printf("Shared Object Header Message Master Table");
+ break;
+ case 16:
+ printf("Shared Message Index Stored As A List");
+ break;
+ case 17:
+ printf("Extensible Array Header");
+ break;
+ case 18:
+ printf("Extensible Array Index Block");
+ break;
+ case 19:
+ printf("Extensible Array Super Block");
+ break;
+ case 20:
+ printf("Extensible Array Data Block");
+ break;
+ case 21:
+ printf("Extensible Array Data Block Page");
+ break;
+ case 22:
+ printf("Chunk Proxy");
+ break;
+ case 23:
+ printf("Fixed Array Header");
+ break;
+ case 24:
+ printf("Fixed Array Data Block");
+ break;
+ case 25:
+ printf("Fixed Array Data Block Page");
+ break;
+ case 26:
+ printf("File Superblock");
+ break;
+ case 27:
+ printf("Test Entry");
+ break;
+ case 28:
+ printf("Number of Types");
+ break;
+ default:
+ printf("*Unknown*");
+ break;
+
+ } /* end switch */
+
+} /* print_entry_type_to_screen */
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_index()
+ *
+ * Purpose: DEBUG CODE (for when verbose is set).
+ * (copied from cache_tagging.c)
+ *
+ * Prints cache index to screen, including address of entries,
+ * tag values of entries, and entry types.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * January 25, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int print_index(hid_t fid) {
+
+ H5F_t * f = NULL; /* File Pointer */
+ H5C_t * cache_ptr = NULL; /* Cache Pointer */
+ int i = 0; /* Iterator */
+ H5C_cache_entry_t *next_entry_ptr = NULL; /* entry pointer */
+
+ /* Get Internal File / Cache Pointers */
+ if ( NULL == (f = (H5F_t *)H5I_object(fid)) ) TEST_ERROR;
+ cache_ptr = f->shared->cache;
+
+ /* Initial (debugging) loop */
+ printf("CACHE SNAPSHOT:\n");
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ next_entry_ptr = cache_ptr->index[i];
+
+ while (next_entry_ptr != NULL) {
+ printf("Addr = %u, ", (unsigned int)next_entry_ptr->addr);
+ printf("Tag = %u, ", (unsigned int)next_entry_ptr->tag);
+ printf("Dirty = %d, ", (int)next_entry_ptr->is_dirty);
+ printf("Protected = %d, ", (int)next_entry_ptr->is_protected);
+ printf("Pinned = %d, ", (int)next_entry_ptr->is_pinned);
+ printf("Corked = %d, ", (int)next_entry_ptr->is_corked);
+ print_entry_type_to_screen(next_entry_ptr->type->id);
+ printf("\n");
+ next_entry_ptr = next_entry_ptr->ht_next;
+ } /* end if */
+
+ } /* end for */
+ printf("\n");
+
+ return 0;
+
+error:
+
+ return -1;
+
+} /* print_index */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_cork_tag()
+ *
+ * Purpose: This routine verifies that all cache entries associated with
+ * the object tag are marked with the desired "cork" status.
+ *
+ * Return: 0 on Success, -1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_cork_tag(hid_t fid, haddr_t tag, hbool_t status)
+{
+ int i = 0; /* Iterator */
+ H5F_t * f = NULL; /* File Pointer */
+ H5C_t * cache_ptr = NULL; /* Cache Pointer */
+ H5C_cache_entry_t *next_entry_ptr = NULL; /* entry pointer */
+
+ /* Get Internal File / Cache Pointers */
+ if ( NULL == (f = (H5F_t *)H5I_object(fid)) ) TEST_ERROR;
+ cache_ptr = f->shared->cache;
+
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+
+ next_entry_ptr = cache_ptr->index[i];
+
+ while (next_entry_ptr != NULL) {
+
+ if (next_entry_ptr->tag == tag && next_entry_ptr->is_corked != status)
+ TEST_ERROR;
+
+ next_entry_ptr = next_entry_ptr->ht_next;
+
+ } /* end if */
+
+ } /* for */
+
+ return 0;
+
+error:
+ return -1;
+} /* verify_cork_tag */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_old_dset_cork
+ *
+ * Purpose: This function verifies corking operation for datasets
+ * created with old library format. Cache entries associated with the
+ * object tag are checked for the correct cork status.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_old_dset_cork(void)
+{
+ /* Variable Declarations */
+ hid_t fid = -1; /* File ID */
+ hid_t did = -1, did2 = -1, did3 = -1; /* Dataset IDs */
+ hid_t dcpl = -1, dcpl2 = -1, dcpl3 = -1; /* Dataset creation property lists */
+ hid_t sid = -1, sid2 = -1, sid3 = -1; /* Dataspace IDs */
+ hsize_t dims[2] = {100, 20}; /* Dataset dimension sizes */
+ hsize_t max_dims[2] = {100, H5S_UNLIMITED}; /* Dataset maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2, 5}; /* Dataset chunked dimension sizes */
+ int buf[100][20]; /* Data buffer */
+ int i = 0, j = 0; /* Local index variable */
+ H5O_info_t oinfo, oinfo2, oinfo3; /* Object metadata information */
+ hsize_t dims2[2] = {8, 16}; /* Dataset dimension sizes */
+
+ /* Testing Macro */
+ TESTING("cork status for datasets with old format");
+
+ /* Create the file */
+ if((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Create dcpl */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use chunked dataset */
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) FAIL_STACK_ERROR
+
+ /* Create chunked dataset with v1-btree indexing: DSET_BT1 */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ TEST_ERROR;
+ if((did = H5Dcreate2(fid, DSET_BT1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Get dataset object header address: DSET_BT1 */
+ if(H5Oget_info(did, &oinfo) < 0 ) TEST_ERROR;
+
+ /* Cork the dataset: DSET_BT1 */
+ if(H5Odisable_mdc_flushes(did) < 0 ) TEST_ERROR;
+
+ /* Verify cork status */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Initialize data buffer */
+ for(i = 0; i < (int)dims[0]; i++) {
+ for(j = 0; j < (int)dims[1]; j++) {
+ buf[i][j] = (i + 1) * (j + 1);
+ }
+ }
+
+ /* Write to the dataset: DSET_BT1 */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ TEST_ERROR;
+
+ /* Verify the cork status for DSET_BT1 */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Create compact dataset: DSET_COMPACT */
+ if((sid2 = H5Screate_simple(2, dims2, NULL)) < 0)
+ FAIL_STACK_ERROR
+ if((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_layout(dcpl2, H5D_COMPACT) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_alloc_time(dcpl2, H5D_ALLOC_TIME_EARLY) < 0)
+ FAIL_STACK_ERROR
+ if((did2 = H5Dcreate2(fid, DSET_COMPACT, H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get dataset object address */
+ if(H5Oget_info(did2, &oinfo2) < 0 ) TEST_ERROR;
+
+ /* Cork the dataset: DSET_COMPACT */
+ if(H5Odisable_mdc_flushes(did2) < 0 ) TEST_ERROR;
+
+ /* Verify cork status */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid) < 0 ) TEST_ERROR;
+ if(H5Pclose(dcpl) < 0 ) TEST_ERROR;
+
+ if(H5Dclose(did2) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid2) < 0 ) TEST_ERROR;
+ if(H5Pclose(dcpl2) < 0 ) TEST_ERROR;
+
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ /* Reopen the file */
+ if((fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+
+ /* Initialize data buffer */
+ for(i = 0; i < (int)dims[0]; i++) {
+ for(j = 0; j < (int)dims[1]; j++) {
+ buf[i][j] = (i + 1) * (j + 1);
+ }
+ }
+
+ /* Open and write to the dataset: DSET_BT1 */
+ if((did = H5Dopen2(fid, DSET_BT1, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+
+ /* Create contiguous dataset: DSET_CONTIG */
+ if((sid3 = H5Screate_simple(2, dims2, NULL)) < 0)
+ FAIL_STACK_ERROR
+ if((dcpl3 = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_layout(dcpl3, H5D_CONTIGUOUS) < 0)
+ FAIL_STACK_ERROR
+ if((did3 = H5Dcreate2(fid, DSET_CONTIG, H5T_NATIVE_INT, sid3, H5P_DEFAULT, dcpl3, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get dataset object address: DSET_CONTIG */
+ if(H5Oget_info(did3, &oinfo3) < 0 ) TEST_ERROR;
+
+ /* Cork the dataset: DSET_CONTIG */
+ if(H5Odisable_mdc_flushes(did3) < 0 ) TEST_ERROR;
+
+ /* Verify the cork status for DSET_CONTIG */
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Verify the cork status for DSET_BT1 */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Un-cork the dataset: DSET_CONTIG */
+ if(H5Oenable_mdc_flushes(did3) < 0 ) TEST_ERROR;
+
+ /* Verify the cork status for DSET_CONTIG */
+ if(verify_cork_tag(fid, oinfo3.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did3) < 0 ) TEST_ERROR;
+ if(H5Pclose(dcpl3) < 0 ) TEST_ERROR;
+ if(H5Dclose(did) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Sclose(sid2);
+ H5Sclose(sid3);
+ H5Dclose(did);
+ H5Dclose(did2);
+ H5Dclose(did3);
+ H5Pclose(dcpl);
+ H5Pclose(dcpl2);
+ H5Pclose(dcpl3);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+} /* verify_old_dset_cork */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_obj_dset_cork
+ *
+ * Purpose: This function verifies corking operations for dataset objects.
+ * Cache entries associated with the object tag are checked
+ * for the correct cork status.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_obj_dset_cork(hbool_t swmr)
+{
+ /* Variable Declarations */
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t aid = -1; /* Attribute ID */
+ hid_t sid = -1, sid2 = -1; /* Dataspace IDs */
+ hid_t did = -1, did2 = -1; /* Dataset IDs */
+ hid_t oid = -1; /* Object ID */
+ hid_t dcpl2; /* Dataset creation property list */
+ int i = 0; /* Local index variable */
+ hsize_t dim[1] = {100}; /* Dataset dimension size */
+ hsize_t chunk_dim[1] = {7}; /* Dataset chunk dimension size */
+ H5O_info_t oinfo, oinfo2; /* Object metadata information */
+ char attrname[500]; /* Name of attribute */
+ unsigned flags; /* File access flags */
+
+ if(swmr) {
+ TESTING("cork status for dataset objects with attributes (SWMR)");
+ } else {
+ TESTING("cork status for dataset objects with attributes");
+ }
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0 )
+ TEST_ERROR;
+ /* Set to use latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR;
+
+ /* Create the file with/without SWMR access */
+ flags = H5F_ACC_TRUNC;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fcreate(FILENAME, flags, H5P_DEFAULT, fapl)) < 0 )
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if((sid = H5Screate(H5S_SCALAR)) < 0 )
+ TEST_ERROR;
+
+ /* Create dataset: DSET */
+ if((did = H5Dcreate2(fid, DSET, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Get dataset object header address */
+ if(H5Oget_info(did, &oinfo) < 0 )
+ TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET */
+ if(H5Odisable_mdc_flushes(did) < 0 )
+ TEST_ERROR;
+
+ /* Attach and write to an attribute to the dataset: DSET */
+ if((aid = H5Acreate2(did, ATTR, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0)
+ TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Create dcpl */
+ if((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0 )
+ TEST_ERROR;
+ /* Set to early allocation for dataset space */
+ if(H5Pset_alloc_time(dcpl2, H5D_ALLOC_TIME_EARLY) < 0)
+ TEST_ERROR
+
+ /* Create chunked dataset with implicit indexing: DSET_NONE */
+ if(H5Pset_chunk(dcpl2, 1, chunk_dim) < 0)
+ FAIL_STACK_ERROR
+ if((sid2 = H5Screate_simple(1, dim, NULL)) < 0)
+ TEST_ERROR;
+ if((did2 = H5Dcreate2(fid, DSET_NONE, H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl2, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Get dataset object header address */
+ if(H5Oget_info(did2, &oinfo2) < 0 )
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET_NONE */
+ if(H5Odisable_mdc_flushes(did2) < 0 )
+ TEST_ERROR;
+
+ /* Attach 8 attributes to the dataset */
+ for(i = 0;i < 8; i++) {
+ sprintf(attrname, "attr %d", i);
+ if((aid = H5Acreate2(did2, attrname, H5T_NATIVE_UINT, sid2, H5P_DEFAULT, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &i) < 0 )
+ TEST_ERROR;
+ if(H5Aclose(aid) < 0 )
+ TEST_ERROR;
+ } /* end for */
+
+ /* Verify cork status of the dataset: DSET_NONE */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Oclose(did) < 0 ) TEST_ERROR;
+ if(H5Oclose(did2) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid2) < 0 ) TEST_ERROR;
+ if(H5Pclose(dcpl2) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ /* Re-open the file */
+ flags = H5F_ACC_RDWR;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fopen(FILENAME, flags, fapl)) < 0)
+ TEST_ERROR;
+
+ /* Open the dataset object: DSET_NONE */
+ if((oid = H5Oopen(fid, DSET_NONE, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo2.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Open the attribute attached to the dataset object: DSET_NONE */
+ if((aid = H5Aopen_by_idx(oid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET_NONE */
+ if(H5Odisable_mdc_flushes(oid) < 0) TEST_ERROR
+
+ /* Verify cork status of the dataset: DSET_NONE */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0 ) TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Oclose(oid) < 0 ) TEST_ERROR;
+ if(H5Pclose(fapl) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Aclose(aid);
+ H5Sclose(sid);
+ H5Sclose(sid2);
+ H5Dclose(did);
+ H5Dclose(did2);
+ H5Oclose(oid);
+ H5Pclose(dcpl2);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+} /* verify_obj_dset_cork */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_dset_cork
+ *
+ * Purpose: This function verifies corking operations for chunked datasets
+ * with different indexing types.
+ * Cache entries associated with the object tag are checked
+ * for the correct cork status.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_dset_cork(hbool_t swmr, hbool_t new_format)
+{
+ /* Variable Declarations */
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t did = -1, did2 = -1, did3 = -1; /* Dataset IDs */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ hid_t sid = -1, sid2 = -1, sid3 = -1; /* Dataspace IDs */
+ hsize_t dims[2] = {100, 20}; /* Dataset dimension sizes */
+ hsize_t max_dims[2] = {100, H5S_UNLIMITED}; /* Dataset maximum dimension sizes */
+ hsize_t chunk_dims[2] = {2, 5}; /* Dataset chunked dimension sizes */
+ int buf[100][20]; int i = 0, j = 0; /* Data buffer */
+ H5O_info_t oinfo, oinfo2, oinfo3; /* Object metadata information */
+ unsigned flags; /* File access flags */
+
+ /* Testing Macro */
+ if(swmr) {
+ if(new_format) {
+ TESTING("cork status for chunked datasets with different indexing types (SWMR & latest)");
+ } else {
+ TESTING("cork status for chunked datasets with different indexing types (SWMR & non-latest)");
+ } /* end if */
+ } else {
+ TESTING("cork status for chunked datasets with different indexing types (non-SWMR)");
+ } /* end if */
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0 )
+ TEST_ERROR;
+ if(new_format) {
+ /* Set to use latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR;
+ } /* end if */
+
+ /* Create the file */
+ flags = H5F_ACC_TRUNC;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fcreate(FILENAME, flags, H5P_DEFAULT, fapl)) < 0 )
+ TEST_ERROR;
+
+ /* Create dcpl */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use chunked dataset */
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0) FAIL_STACK_ERROR
+
+ /* Create chunked dataset with extensive array indexing: DSET_EA */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ TEST_ERROR;
+ if((did = H5Dcreate2(fid, DSET_EA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Get dataset object header address: DSET_EA */
+ if(H5Oget_info(did, &oinfo) < 0 ) TEST_ERROR;
+
+ /* Cork the dataset: DSET_EA */
+ if(H5Odisable_mdc_flushes(did) < 0 ) TEST_ERROR;
+
+ /* Verify cork status */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Create chunked dataset with fixed array indexing: DSET_FA */
+ if((sid2 = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+ if((did2 = H5Dcreate2(fid, DSET_FA, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Get dataset object header address: DSET_FA */
+ if(H5Oget_info(did2, &oinfo2) < 0 )
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET_FA */
+ if(H5Odisable_mdc_flushes(did2) < 0 ) TEST_ERROR;
+
+ /* Uncork the dataset: DSET_EA */
+ if(H5Oenable_mdc_flushes(did) < 0 ) TEST_ERROR;
+
+ /* Verify the cork status for DSET_FA */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Verify the cork status for DSET_EA */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Create chunked dataset with v2-Btree indexing */
+ max_dims[0] = H5S_UNLIMITED;
+ if((sid3 = H5Screate_simple(2, dims, max_dims)) < 0)
+ TEST_ERROR;
+ if((did3 = H5Dcreate2(fid, DSET_BT2, H5T_NATIVE_INT, sid3, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+
+ /* Get dataset object header address: DSET_BT2 */
+ if(H5Oget_info(did3, &oinfo3) < 0 )
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET_BT2 */
+ if(H5Odisable_mdc_flushes(did3) < 0 ) TEST_ERROR;
+
+ /* Verify the cork status for DSET_BT2 */
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did3) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid3) < 0 ) TEST_ERROR;
+
+ if(H5Dclose(did2) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid2) < 0 ) TEST_ERROR;
+
+ if(H5Dclose(did) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid) < 0 ) TEST_ERROR;
+
+ if(H5Pclose(dcpl) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ /* Reopen the file */
+ flags = H5F_ACC_RDWR;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fopen(FILENAME, flags, fapl)) < 0 ) TEST_ERROR;
+
+ /* Initialize data buffer */
+ for(i = 0; i < (int)dims[0]; i++) {
+ for(j = 0; j < (int)dims[1]; j++) {
+ buf[i][j] = (i + 1) * (j + 1);
+ } /* end for */
+ } /* end for */
+
+ /* Open and write to the dataset: DSET_EA */
+ if((did = H5Dopen2(fid, DSET_EA, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ TEST_ERROR;
+
+ /* Verify the cork status for DSET_EA */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Open and write to the dataset: DSET_FA */
+ if((did2 = H5Dopen2(fid, DSET_FA, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET_FA */
+ if(H5Odisable_mdc_flushes(did2) < 0 ) TEST_ERROR;
+
+ /* Verify the cork status for DSET_FA */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Open and write to the dataset: DSET_BT2 */
+ if((did3 = H5Dopen2(fid, DSET_BT2, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+ if(H5Dwrite(did3, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ TEST_ERROR;
+
+ /* Verify the cork status for DSET_BT2 */
+ if(verify_cork_tag(fid, oinfo3.addr, FALSE) < 0 )
+ TEST_ERROR;
+
+ /* Cork the dataset: DSET_BT2 */
+ if(H5Odisable_mdc_flushes(did3) < 0 ) TEST_ERROR;
+
+ /* Verify the cork status for DSET_BT2 */
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0 )
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0 ) TEST_ERROR;
+ if(H5Dclose(did2) < 0 ) TEST_ERROR;
+ if(H5Dclose(did3) < 0 ) TEST_ERROR;
+ if(H5Pclose(fapl) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Sclose(sid2);
+ H5Sclose(sid3);
+ H5Dclose(did);
+ H5Dclose(did2);
+ H5Dclose(did3);
+ H5Pclose(dcpl);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+} /* verify_dset_cork */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_group_cork
+ *
+ * Purpose: This function verifies corking operations for groups.
+ * Cache entries associated with the object tag are checked
+ * for the correct cork status.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_group_cork(hbool_t swmr)
+{
+ /* Variable Declarations */
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t gid = -1, gid2 = -1, gid3 = -1; /* Group IDs */
+ H5O_info_t oinfo, oinfo2, oinfo3; /* Object metadata information */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ char attrname[500]; /* Name of attribute */
+ unsigned flags; /* File access flags */
+ int i = 0; /* Local index variable */
+
+ /* Testing Macro */
+ if(swmr) {
+ TESTING("cork status for groups (SWMR)");
+ } else {
+ TESTING("cork status for groups");
+ }
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0 )
+ TEST_ERROR;
+ /* Set to use latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR;
+
+ /* Create the file */
+ flags = H5F_ACC_TRUNC;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fcreate(FILENAME, flags, H5P_DEFAULT, fapl)) < 0 )
+ TEST_ERROR;
+
+ /* Create 3 groups */
+ if((gid = H5Gcreate2(fid, GRP, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if((gid2 = H5Gcreate2(gid, GRP2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if((gid3 = H5Gcreate2(gid2, GRP3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Cork the second group: GRP2 */
+ if(H5Odisable_mdc_flushes(gid2) < 0) TEST_ERROR
+
+ /* Get group object header addresses */
+ if(H5Oget_info(gid, &oinfo) < 0) TEST_ERROR;
+ if(H5Oget_info(gid2, &oinfo2) < 0) TEST_ERROR;
+ if(H5Oget_info(gid3, &oinfo3) < 0) TEST_ERROR;
+
+ /* Verify cork status of the groups */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0)
+ TEST_ERROR;
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0)
+ TEST_ERROR;
+ if(verify_cork_tag(fid, oinfo3.addr, FALSE) < 0)
+ TEST_ERROR;
+
+ /* Close the second group: GRP2 */
+ if(H5Gclose(gid2) < 0 ) TEST_ERROR;
+
+ /* Re-open the second group: GRP2 */
+ if((gid2 = H5Gopen2(gid, GRP2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify cork status of the second group: GRP2 */
+ if(verify_cork_tag(fid, oinfo2.addr, FALSE) < 0)
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Gclose(gid) < 0 ) TEST_ERROR;
+ if(H5Gclose(gid2) < 0 ) TEST_ERROR;
+ if(H5Gclose(gid3) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+ /* Re-open the file and the three groups */
+ flags = H5F_ACC_RDWR;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fopen(FILENAME, flags, fapl)) < 0 )
+ FAIL_STACK_ERROR
+ if((gid = H5Gopen2(fid, GRP, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((gid2 = H5Gopen2(gid, GRP2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((gid3 = H5Gopen2(gid2, GRP3, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create dataspace */
+ if((sid = H5Screate(H5S_SCALAR)) < 0 ) TEST_ERROR;
+
+ /* Attach 8 attributes to the third group: GRP3 */
+ for(i = 0;i < 8; i++) {
+ sprintf(attrname, "attr %d", i);
+ if((aid = H5Acreate2(gid3, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &i) < 0)
+ TEST_ERROR;
+ /* Cork the third group while attaching attributes */
+ if(i == 3) {
+ if(H5Odisable_mdc_flushes(gid3) < 0) TEST_ERROR
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0)
+ TEST_ERROR;
+ }
+ if(H5Aclose(aid) < 0 ) TEST_ERROR;
+ } /* end for */
+
+ /* Verify cork status of the third group: GRP3 */
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Gclose(gid) < 0) TEST_ERROR;
+ if(H5Gclose(gid2) < 0) TEST_ERROR;
+ if(H5Gclose(gid3) < 0) TEST_ERROR;
+ if(H5Sclose(sid) < 0) TEST_ERROR;
+ if(H5Pclose(fapl) < 0) TEST_ERROR;
+ if(H5Fclose(fid) < 0) TEST_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Gclose(gid);
+ H5Gclose(gid2);
+ H5Gclose(gid3);
+ H5Sclose(sid);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+} /* verify_group_cork */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_named_cork
+ *
+ * Purpose: This function verifies corking operations for named datatypes.
+ * Cache entries associated with the object tag are checked
+ * for the correct cork status.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_named_cork(hbool_t swmr)
+{
+ /* Variable Declarations */
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t tid = -1, tid2 = -1, tid3 = -1; /* Datatype IDs */
+ hid_t gid = -1, gid2 = -1; /* Group IDs */
+ H5O_info_t oinfo, oinfo2, oinfo3, oinfo4; /* Object metadata information */
+ hid_t aid = -1; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t did; /* Dataset ID */
+ char attrname[500]; /* Name of attribute */
+ unsigned flags; /* File access flags */
+ int i = 0; /* Local index variable */
+
+ /* Testing Macro */
+ if(swmr) {
+ TESTING("cork status for named datatypes (SWMR)");
+ } else {
+ TESTING("cork status for named datatypes");
+ }
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0 )
+ TEST_ERROR;
+ /* Set to use latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR;
+
+ /* Create the file */
+ flags = H5F_ACC_TRUNC;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fcreate(FILENAME, flags, H5P_DEFAULT, fapl)) < 0 )
+ TEST_ERROR;
+
+ /* Create 3 copies of datatypes */
+ if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR;
+ if((tid2 = H5Tcopy(H5T_NATIVE_LONG)) < 0) TEST_ERROR;
+ if((tid3 = H5Tcopy(H5T_NATIVE_CHAR)) < 0) TEST_ERROR;
+
+ /* Commit datatype /DT */
+ if(H5Tcommit2(fid, DT, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+
+ /* Create /GRP */
+ if((gid = H5Gcreate2(fid, GRP, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ /* Commit datatype /GRP/DT2 */
+ if(H5Tcommit2(gid, DT2, tid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+
+ /* Create /GRP/GRP2 */
+ if((gid2 = H5Gcreate2(gid, GRP2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ /* Commit datatype /GRP/GRP2/DT3 */
+ if(H5Tcommit2(gid2, DT3, tid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+
+ /* Cork 2 named datatypes: /DT and /GRP/GRP2/DT3 */
+ if(H5Odisable_mdc_flushes(tid) < 0) TEST_ERROR
+ if(H5Odisable_mdc_flushes(tid3) < 0) TEST_ERROR
+
+ /* Get named datatype object header addresses */
+ if(H5Oget_info(tid, &oinfo) < 0) TEST_ERROR;
+ if(H5Oget_info(tid2, &oinfo2) < 0) TEST_ERROR;
+ if(H5Oget_info(tid3, &oinfo3) < 0) TEST_ERROR;
+
+ /* Verify cork status of the named datatypes */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0)
+ TEST_ERROR;
+ if(verify_cork_tag(fid, oinfo2.addr, FALSE) < 0)
+ TEST_ERROR;
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Close the datatypes */
+ if(H5Tclose(tid) < 0 ) TEST_ERROR;
+ if(H5Tclose(tid2) < 0 ) TEST_ERROR;
+ if(H5Tclose(tid3) < 0 ) TEST_ERROR;
+
+ /* Re-open the named datatypes */
+ if((tid = H5Topen2(fid, DT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((tid2 = H5Topen2(gid, DT2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((tid3 = H5Topen2(gid2, DT3, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify cork status of the named datatypes */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0)
+ TEST_ERROR;
+ if(verify_cork_tag(fid, oinfo2.addr, FALSE) < 0)
+ TEST_ERROR;
+ if(verify_cork_tag(fid, oinfo3.addr, FALSE) < 0)
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Tclose(tid) < 0 ) TEST_ERROR;
+ if(H5Tclose(tid2) < 0 ) TEST_ERROR;
+ if(H5Tclose(tid3) < 0 ) TEST_ERROR;
+ if(H5Gclose(gid) < 0 ) TEST_ERROR;
+ if(H5Gclose(gid2) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+
+
+ /* Re-open the file and the three groups */
+ flags = H5F_ACC_RDWR;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid = H5Fopen(FILENAME, flags, fapl)) < 0 )
+ FAIL_STACK_ERROR
+ if((gid = H5Gopen2(fid, GRP, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((gid2 = H5Gopen2(gid, GRP2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the named datatypes */
+ if((tid = H5Topen2(fid, DT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((tid2 = H5Topen2(gid, DT2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if((tid3 = H5Topen2(gid2, DT3, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Cork the datatype: DT2 */
+ if(H5Odisable_mdc_flushes(tid2) < 0) TEST_ERROR
+
+ /* Create dataspace */
+ if((sid = H5Screate(H5S_SCALAR)) < 0 ) TEST_ERROR;
+
+ /* Attach 8 attributes to datatype: DT3 */
+ for(i = 0;i < 8; i++) {
+ sprintf(attrname, "attr %d", i);
+ if((aid = H5Acreate2(tid3, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0 )
+ TEST_ERROR;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &i) < 0)
+ TEST_ERROR;
+ /* Cork the datatype while attaching attributes */
+ if(i == 3) {
+ if(H5Odisable_mdc_flushes(tid3) < 0) TEST_ERROR
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0)
+ TEST_ERROR;
+ }
+ if(H5Aclose(aid) < 0 ) TEST_ERROR;
+ } /* end for */
+
+ /* Create a dataset with named datatype: DT */
+ if((did = H5Dcreate2(fid, DSET, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get dataset object header address */
+ if(H5Oget_info(did, &oinfo4) < 0) TEST_ERROR;
+
+ /* Cork the dataset: DSET */
+ if(H5Odisable_mdc_flushes(did) < 0) TEST_ERROR
+
+ /* Verify cork status of the datatype: DT */
+ if(verify_cork_tag(fid, oinfo.addr, FALSE) < 0)
+ TEST_ERROR;
+ /* Verify cork status of the datatype: DT2 */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0)
+ TEST_ERROR;
+ /* Verify cork status of the datatype: DT3 */
+ if(verify_cork_tag(fid, oinfo3.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Un-cork the datatype: DT3 */
+ if(H5Oenable_mdc_flushes(tid3) < 0) TEST_ERROR
+ /* Verify cork status of the datatype: DT3 */
+ if(verify_cork_tag(fid, oinfo3.addr, FALSE) < 0)
+ TEST_ERROR;
+
+ /* Cork the datatype: DT */
+ if(H5Odisable_mdc_flushes(tid) < 0) TEST_ERROR
+
+ /* Verify cork status of the datatype: DT */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0)
+ TEST_ERROR;
+ /* Verify cork status of the datatype: DT2 */
+ if(verify_cork_tag(fid, oinfo2.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo4.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* Verify cork status of the datatype: DT */
+ if(verify_cork_tag(fid, oinfo.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Verify cork status of the dataset: DSET */
+ if(verify_cork_tag(fid, oinfo4.addr, FALSE) < 0)
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Tclose(tid) < 0 ) TEST_ERROR;
+ if(H5Tclose(tid2) < 0 ) TEST_ERROR;
+ if(H5Tclose(tid3) < 0 ) TEST_ERROR;
+ if(H5Gclose(gid) < 0 ) TEST_ERROR;
+ if(H5Gclose(gid2) < 0 ) TEST_ERROR;
+ if(H5Sclose(sid) < 0 ) TEST_ERROR;
+ if(H5Fclose(fid) < 0 ) TEST_ERROR;
+ if(H5Pclose(fapl) < 0) TEST_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(tid);
+ H5Tclose(tid2);
+ H5Tclose(tid3);
+ H5Gclose(gid);
+ H5Gclose(gid2);
+ H5Dclose(did);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+} /* verify_named_cork */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_multiple_cork
+ *
+ * Purpose: This function verifies corking operations when there are
+ * multiple opens of files, objects, attributes.
+ * (based on test_attr_bug5() in tattr.c)
+ * Cache entries associated with the object tag are checked
+ * for the correct cork status.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+verify_multiple_cork(hbool_t swmr)
+{
+ /* Variable Declarations */
+ hid_t fid1 = -1, fid2 = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t tid1 = -1, tid2 = -1; /* Datatype IDs */
+ hid_t gid1 = -1, gid2 = -1; /* Group IDs */
+ hid_t did1 = -1, did2 = -1; /* Dataset ID */
+ hid_t aidg1 = -1, aidg2 = -1; /* Attribute ID */
+ hid_t aidd1 = -1, aidd2 = -1; /* Attribute ID */
+ hid_t aidt1 = -1, aidt2 = -1; /* Attribute ID */
+ hid_t sid = -1; /* Dataspace ID */
+ H5O_info_t oinfo1, oinfo2, oinfo3; /* Object metadata information */
+ hsize_t dim[1] = {5}; /* Dimension sizes */
+ unsigned flags; /* File access flags */
+ hbool_t corked; /* Cork status */
+ herr_t ret; /* Return value */
+
+ /* Testing Macro */
+ if(swmr) {
+ TESTING("cork status for multiple opens (SWMR)");
+ } else {
+ TESTING("cork status for multiple opens");
+ }
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0 )
+ TEST_ERROR
+ /* Set to use latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR
+
+ /* Create the file */
+ flags = H5F_ACC_TRUNC;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid1 = H5Fcreate(FILENAME, flags, H5P_DEFAULT, fapl)) < 0 )
+ TEST_ERROR
+
+ /* Open root group */
+ if((gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create and commit datatype */
+ if((tid1 = H5Tcopy(H5T_STD_I32LE)) < 0)
+ TEST_ERROR
+ if(H5Tcommit2(fid1, DT, tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR
+
+ /* Create dataset */
+ if((sid = H5Screate_simple(1, dim, NULL)) < 0)
+ TEST_ERROR
+ if((did1 = H5Dcreate2(fid1, DSET, tid1, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create attribute on root group */
+ if((aidg1 = H5Acreate2(gid1, GRP_ATTR, tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create attribute on dataset */
+ if((aidd1 = H5Acreate2(did1, DSET_ATTR, tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create attribute on datatype */
+ if((aidt1 = H5Acreate2(tid1, DT_ATTR, tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Aclose(aidt1) < 0) TEST_ERROR
+ if(H5Aclose(aidd1) < 0) TEST_ERROR
+ if(H5Aclose(aidg1) < 0) TEST_ERROR
+ if(H5Dclose(did1) < 0) TEST_ERROR
+ if(H5Tclose(tid1) < 0) TEST_ERROR
+ if(H5Gclose(gid1) < 0) TEST_ERROR
+ if(H5Fclose(fid1) < 0) TEST_ERROR
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* Open the file twice: fid1, fid2 */
+ flags = H5F_ACC_RDWR;
+ if(swmr)
+ flags |= H5F_ACC_SWMR_WRITE;
+ if((fid1 = H5Fopen(FILENAME, flags, fapl)) < 0)
+ TEST_ERROR
+ if((fid2 = H5Fopen(FILENAME, flags, fapl)) < 0)
+ TEST_ERROR
+
+ /* Open the root group twice: gid1, gid2 */
+ if((gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((gid2 = H5Gopen2(fid2, "/", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Open the root group attribute twice: aidg1, aidg2 */
+ if((aidg1 = H5Aopen(gid1, GRP_ATTR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((aidg2 = H5Aopen(gid2, GRP_ATTR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Cork the group: gid2 */
+ if(H5Odisable_mdc_flushes(gid2) < 0)
+ TEST_ERROR
+
+ /* Verify cork status of the group: gid2 */
+ if(H5Oget_info(gid2, &oinfo1) < 0) TEST_ERROR;
+ if(verify_cork_tag(fid2, oinfo1.addr, TRUE) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the group: gid1 */
+ if(H5Oare_mdc_flushes_disabled(gid1, &corked) < 0)
+ TEST_ERROR;
+ if(!corked) TEST_ERROR
+
+ /* Open the dataset twice: did1, did2 */
+ if((did1 = H5Dopen2(fid1, DSET, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((did2 = H5Dopen2(fid2, DSET, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Open the dataset attribute twice: aidd1, aidd2 */
+ if((aidd1 = H5Aopen(did1, DSET_ATTR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((aidd2 = H5Aopen(did2, DSET_ATTR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Cork the dataset: did1 */
+ if(H5Odisable_mdc_flushes(did1) < 0)
+ TEST_ERROR
+
+ /* Verify cork status of the dataset: did1 */
+ if(H5Oget_info(did1, &oinfo2) < 0) TEST_ERROR;
+ if(verify_cork_tag(fid1, oinfo2.addr, TRUE) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset: did2 */
+ if(H5Oare_mdc_flushes_disabled(did2, &corked) < 0)
+ TEST_ERROR;
+ if(!corked) TEST_ERROR
+
+ /* Open the datatype twice: tid1, tid2 */
+ if((tid1 = H5Topen2(fid1, DT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((tid2 = H5Topen2(fid2, DT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Open the datatype attribute twice: aidt1, aidt2 */
+ if((aidt1 = H5Aopen(tid1, DT_ATTR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((aidt2 = H5Aopen(tid2, DT_ATTR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Cork the datatype: tid2 */
+ if(H5Odisable_mdc_flushes(tid2) < 0)
+ TEST_ERROR
+
+ /* Verify cork status of the datatype: tid2 */
+ if(H5Oget_info(tid2, &oinfo3) < 0) TEST_ERROR;
+ if(verify_cork_tag(fid2, oinfo3.addr, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Check cork status of the datatype: tid1 */
+ if(H5Oare_mdc_flushes_disabled(tid1, &corked) < 0)
+ TEST_ERROR;
+ if(!corked) TEST_ERROR
+
+ /* Uncork the group: gid1 */
+ if(H5Oenable_mdc_flushes(gid1) < 0)
+ TEST_ERROR
+
+ /* Verify cork status of the group: gid1 */
+ if(H5Oget_info(gid1, &oinfo1) < 0) TEST_ERROR;
+ if(verify_cork_tag(fid1, oinfo1.addr, FALSE) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the group: gid2 */
+ if(H5Oare_mdc_flushes_disabled(gid2, &corked) < 0)
+ TEST_ERROR;
+ if(corked) TEST_ERROR
+
+ /* Close the group: gid2 */
+ if(H5Gclose(gid2) < 0) TEST_ERROR
+
+ /* Check cork status of the group: gid1 */
+ if(H5Oare_mdc_flushes_disabled(gid1, &corked) < 0)
+ TEST_ERROR;
+ if(corked) TEST_ERROR
+
+ /* Verify cork status of the group: gid1 */
+ if(verify_cork_tag(fid1, oinfo1.addr, FALSE) < 0)
+ TEST_ERROR
+
+ /* Close the group: gid1 */
+ if(H5Gclose(gid1) < 0) TEST_ERROR
+
+ /* Uncork the dataset: gid2 */
+ if(H5Oenable_mdc_flushes(did2) < 0)
+ TEST_ERROR
+
+ /* Verify cork status of the dataset: did2 */
+ if(H5Oget_info(did2, &oinfo2) < 0) TEST_ERROR;
+ if(verify_cork_tag(fid2, oinfo2.addr, FALSE) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset: did1 */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR;
+ if(corked) TEST_ERROR
+
+ /* Close the dataset: did2 */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* Check cork status of the dataset: did1 */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR;
+ if(corked) TEST_ERROR
+
+ /* Verify cork status of the dataset: did1 */
+ if(verify_cork_tag(fid1, oinfo2.addr, FALSE) < 0)
+ TEST_ERROR
+
+ /* Close the dataset: did1 */
+ if(H5Dclose(did1) < 0) TEST_ERROR
+
+ /* Check cork status of the datatype: tid1 */
+ if(H5Oare_mdc_flushes_disabled(tid1, &corked) < 0)
+ TEST_ERROR;
+ if(!corked) TEST_ERROR
+
+ /* Close datatype: tid1 */
+ if(H5Tclose(tid1) < 0) TEST_ERROR
+
+ /* Check cork status of the datatype: tid2 */
+ if(H5Oare_mdc_flushes_disabled(tid2, &corked) < 0)
+ TEST_ERROR;
+ if(!corked) TEST_ERROR
+
+ /* Close datatype: tid2 */
+ if(H5Tclose(tid2) < 0) TEST_ERROR
+
+ /* Should fail to cork the attribute: aidg2; not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Odisable_mdc_flushes(aidg2);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Should fail to uncork the attribute: aidd1; not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Odisable_mdc_flushes(aidd1);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Should fail to check cork status of the attribute: aidt2; not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Oare_mdc_flushes_disabled(aidt2, &corked);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close all attributes */
+ if(H5Aclose(aidg1) < 0) TEST_ERROR
+ if(H5Aclose(aidg2) < 0) TEST_ERROR
+ if(H5Aclose(aidd1) < 0) TEST_ERROR
+ if(H5Aclose(aidd2) < 0) TEST_ERROR
+ if(H5Aclose(aidt1) < 0) TEST_ERROR
+ if(H5Aclose(aidt2) < 0) TEST_ERROR
+
+ /* Should fail to cork the file: fid1; not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Oare_mdc_flushes_disabled(fid1, &corked);
+ ret = H5Odisable_mdc_flushes(fid1);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Should fail to uncork the file: fid2; not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Oenable_mdc_flushes(fid2);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(fapl) < 0) TEST_ERROR
+ if(H5Fclose(fid1) < 0) TEST_ERROR
+ if(H5Fclose(fid2) < 0) TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Tclose(tid1);
+ H5Tclose(tid2);
+ H5Gclose(gid1);
+ H5Gclose(gid2);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Aclose(aidg1);
+ H5Aclose(aidg2);
+ H5Aclose(aidd1);
+ H5Aclose(aidt1);
+ H5Aclose(aidt2);
+ H5Aclose(aidd2);
+ H5Pclose(fapl);
+ H5Fclose(fid1);
+ H5Fclose(fid1);
+ } H5E_END_TRY;
+ return 1;
+} /* verify_multiple_cork */
+
+/*-------------------------------------------------------------------------
+ * Function: test_objs_cork
+ *
+ * Purpose: This function verifies H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled public
+ * routines are working as specified.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_objs_cork(hbool_t new_format)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t fapl; /* File access property list */
+ hid_t gid, did, tid; /* Object IDs */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+ hsize_t dims[RANK]; /* Dataset dimension sizes */
+ hbool_t corked; /* Cork status of an object */
+ herr_t ret; /* Return value */
+
+ /* Testing Macro */
+ if(new_format) {
+ TESTING("H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled (new library format)");
+ } else {
+ TESTING("H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled (old library format)");
+ } /* end if */
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0 )
+ TEST_ERROR;
+
+ /* Set to use latest format */
+ if(new_format) {
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR;
+ } /* end if */
+
+ /* Create an HDF5 file */
+ if((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Create group */
+ if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the group: not corked */
+ if(H5Oare_mdc_flushes_disabled(gid, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Cork the group: an object */
+ if(H5Odisable_mdc_flushes(gid) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the group: corked */
+ if(H5Oare_mdc_flushes_disabled(gid, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the group */
+ if(H5Gclose(gid) < 0)
+ TEST_ERROR
+
+ /* Create a transient copy of a native type */
+ if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ TEST_ERROR
+
+ /* Should fail to cork the datatype: not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Odisable_mdc_flushes(tid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Create a named datatype */
+ if(H5Tcommit2(fid, "group/datatype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the named datatype: not corked */
+ if(H5Oare_mdc_flushes_disabled(tid, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Cork the named datatype: an object */
+ if(H5Odisable_mdc_flushes(tid) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the named datatype: corked */
+ if(H5Oare_mdc_flushes_disabled(tid, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the named datatype */
+ if(H5Tclose(tid) < 0)
+ TEST_ERROR
+
+ /* Create dataspace */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ if((sid = H5Screate_simple(RANK, dims, NULL)) < 0)
+ TEST_ERROR
+
+ /* Should fail to uncork the dataspace: not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Oenable_mdc_flushes(sid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Create dataset. */
+ if((did = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create attribute on the dataset */
+ if((aid = H5Acreate2(did, "attr", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Should fail to check cork status of the attribute: not an object */
+ H5E_BEGIN_TRY {
+ ret = H5Oare_mdc_flushes_disabled(aid, &corked);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset: not corked */
+ if(H5Oare_mdc_flushes_disabled(did, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Cork the dataset: an object */
+ if(H5Odisable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset: corked */
+ if(H5Oare_mdc_flushes_disabled(did, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the dataset and dataspace */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Open the group */
+ if((gid = H5Oopen(fid, "group", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the group */
+ if(H5Oare_mdc_flushes_disabled(gid, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Cork the group */
+ if(H5Odisable_mdc_flushes(gid) < 0)
+ TEST_ERROR
+
+ /* Should fail to cork the group again */
+ H5E_BEGIN_TRY {
+ ret = H5Odisable_mdc_flushes(gid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Check cork status of the group */
+ if(H5Oare_mdc_flushes_disabled(gid, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Open the named datatype */
+ if((tid = H5Oopen(fid, "group/datatype", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Should fail to un-cork the named datatype that is not corked yet */
+ H5E_BEGIN_TRY {
+ ret = H5Oenable_mdc_flushes(tid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Cork the named datatype */
+ if(H5Odisable_mdc_flushes(tid) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Open the dataset */
+ if((did = H5Oopen(fid, "/dataset", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(did, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Cork the dataset */
+ if(H5Odisable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Check cork status of dataset */
+ if(H5Oare_mdc_flushes_disabled(did, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Un-cork the dataset */
+ if(H5Oenable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(did, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Closing */
+ if(H5Tclose(tid) < 0) TEST_ERROR
+ if(H5Gclose(gid) < 0) TEST_ERROR
+ if(H5Dclose(did) < 0) TEST_ERROR
+ if(H5Sclose(sid) < 0) TEST_ERROR
+ if(H5Aclose(aid) < 0) TEST_ERROR
+ if(H5Pclose(fapl) < 0) TEST_ERROR
+ if(H5Fclose(fid) < 0) TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Aclose(aid);
+ H5Dclose(did);
+ H5Gclose(gid);
+ H5Tclose(tid);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+
+} /* test_objs_cork() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_dset_cork
+ *
+ * Purpose: This function verifies H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled are
+ * working as specified when manipulating datasets.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_dset_cork(hbool_t new_format)
+{
+ hid_t fid; /* File ID */
+ hid_t fapl; /* File access property list */
+ hid_t gid; /* Groupd ID */
+ hid_t did1, did2; /* Dataset IDs */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t sid; /* Dataspace ID */
+ hid_t dcpl; /* Dataset creation property list */
+ hsize_t dims[RANK]; /* Dataset dimensions */
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dataset dimensions */
+ hsize_t cdims[RANK] = {2,2}; /* Chunk dimensions */
+ int fillval = 0; /* Fill value */
+ int i, j, k = 0; /* Local index variables */
+ int data[DIMS0][DIMS1]; /* Data buffer */
+ int rbuf[DIMS0][DIMS1]; /* Data buffer */
+ hbool_t corked; /* Cork status of an object */
+
+ /* Testing Macro */
+ if(new_format) {
+ TESTING("H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled on datasets (new library format)");
+ } else {
+ TESTING("H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled on datasets (old library format)");
+ } /* end if */
+
+ /* Create fapl */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ /* Set to use latest format */
+ if(new_format) {
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0 )
+ TEST_ERROR;
+ } /* end if */
+
+ /* Create a new HDF5 file */
+ if((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Commit the datatype with the group */
+ if((tid1 = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ TEST_ERROR
+ if(H5Tcommit2(gid, "datatype", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR
+
+ /* Cork the named datatype */
+ if(H5Odisable_mdc_flushes(tid1) < 0)
+ TEST_ERROR
+
+ /* Set up dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Enable chunking */
+ if(H5Pset_chunk(dcpl, RANK, cdims) < 0)
+ TEST_ERROR
+
+ /* Set up a fill value */
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval) < 0)
+ TEST_ERROR
+
+ /* Create dataspace */
+ dims[0] = DIMS0;
+ dims[1] = DIMS1;
+ if((sid = H5Screate_simple(RANK, dims, maxdims)) < 0)
+ TEST_ERROR
+
+ /* Create the dataset inside the group with the named datatype */
+ if((did1 = H5Dcreate2(gid, "dataset", tid1, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Cork the dataset */
+ if(H5Odisable_mdc_flushes(did1) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Check cork status of the group */
+ if(H5Oare_mdc_flushes_disabled(gid, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Initialize the buffer */
+ for(i = 0; i < DIMS0;i++)
+ for(j = 0;j < DIMS1;j++)
+ data[i][j] = k++;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did1, tid1, sid, sid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Flush the dataset */
+ if(H5Oflush(did1) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Check cork status of the named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did1) < 0)
+ TEST_ERROR
+
+ /* Open the dataset again */
+ if((did1 = H5Dopen2(gid, "dataset", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Read from the dataset */
+ if(H5Dread(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ TEST_ERROR
+
+ /* Cork the dataset */
+ if(H5Odisable_mdc_flushes(did1) < 0)
+ TEST_ERROR
+
+ /* Delete the dataset */
+ if(H5Ldelete(gid, "dataset", H5P_DEFAULT) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the dataset */
+ if(H5Oclose(did1) < 0) TEST_ERROR
+
+ /* Create the dataset again */
+ if((did1 = H5Dcreate2(gid, "dataset", tid1, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Cork the dataset */
+ if(H5Odisable_mdc_flushes(did1) < 0)
+ TEST_ERROR
+
+ /* Write to the dataset */
+ if(H5Dwrite(did1, tid1, sid, sid, H5P_DEFAULT, data) < 0)
+ TEST_ERROR
+
+ /* Refresh the dataset */
+ if(H5Drefresh(did1) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did1) < 0) TEST_ERROR
+
+ /* First open of the dataset */
+ if((did1 = H5Dopen2(gid, "dataset", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Second open of the dataset */
+ if((did2 = H5Dopen2(gid, "dataset", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Cork the first opened dataset */
+ if(H5Odisable_mdc_flushes(did1) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the first opened dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Check cork status of the second opened dataset */
+ if(H5Oare_mdc_flushes_disabled(did2, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the second opened dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* Check cork status of the first opened dataset */
+ if(H5Oare_mdc_flushes_disabled(did1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Close the first opened dastaset */
+ if(H5Dclose(did1) < 0) TEST_ERROR
+
+ /* Check cork status of the named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid1, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Second open of the named datatype */
+ if((tid2 = H5Topen2(gid, "datatype", H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the second opened named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid2, &corked) < 0)
+ TEST_ERROR
+ if(!corked) TEST_ERROR
+
+ /* Uncork the second opened named datatype */
+ if(H5Oenable_mdc_flushes(tid2) < 0)
+ TEST_ERROR
+
+ /* Check cork status of the second opened named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid2, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Check cork status of the first opened named datatype */
+ if(H5Oare_mdc_flushes_disabled(tid1, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Close the first opened datatype */
+ if(H5Tclose(tid1) < 0) TEST_ERROR
+
+ /* Close the second opened datatype */
+ if(H5Tclose(tid2) < 0) TEST_ERROR
+
+ /* Check cork status of the group */
+ if(H5Oare_mdc_flushes_disabled(gid, &corked) < 0)
+ TEST_ERROR
+ if(corked) TEST_ERROR
+
+ /* Closing */
+ if(H5Gclose(gid) < 0) TEST_ERROR
+ if(H5Sclose(sid) < 0) TEST_ERROR
+ if(H5Pclose(fapl) < 0) TEST_ERROR
+ if(H5Fclose(fid) < 0) TEST_ERROR
+ if(H5Pclose(dcpl) < 0) TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Tclose(tid1);
+ H5Tclose(tid2);
+ H5Pclose(dcpl);
+ H5Gclose(gid);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return 1;
+
+} /* test_dset_cork() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run tests to verify the library's corking operations.
+ *
+ * Return: Success:
+ *
+ * Failure:
+ *
+ * Programmer: Vailin Choi; Feb 2014
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ unsigned nerrs = 0; /* Error Encountered */
+
+ /* Test for dataset created with old library format */
+ nerrs += verify_old_dset_cork();
+
+ /* Tests with new/old library format */
+ /* This is the test moved from th5o.c: test_h5o_cork() */
+ nerrs += test_objs_cork(TRUE);
+ nerrs += test_objs_cork(FALSE);
+ /* This is the test moved from th5o.c: test_h5o_cork_dataset() */
+ nerrs += test_dset_cork(TRUE);
+ nerrs += test_dset_cork(FALSE);
+
+ /* Tests with/without SWMR access */
+ nerrs += verify_obj_dset_cork(TRUE);
+ nerrs += verify_obj_dset_cork(FALSE);
+ nerrs += verify_obj_dset_cork(TRUE);
+ nerrs += verify_dset_cork(TRUE, TRUE);
+ nerrs += verify_dset_cork(FALSE, TRUE);
+ nerrs += verify_dset_cork(TRUE, FALSE);
+ nerrs += verify_group_cork(TRUE);
+ nerrs += verify_group_cork(FALSE);
+ nerrs += verify_named_cork(TRUE);
+ nerrs += verify_named_cork(FALSE);
+ nerrs += verify_multiple_cork(TRUE);
+ nerrs += verify_multiple_cork(FALSE);
+
+ /* Delete test files */
+ HDremove(FILENAME);
+
+ /* Return Errors */
+ return(nerrs > 0);
+
+} /* main */
+
diff --git a/test/corrupt_stab_msg.h5 b/test/corrupt_stab_msg.h5
index 4fa287c..a00616d 100644
--- a/test/corrupt_stab_msg.h5
+++ b/test/corrupt_stab_msg.h5
Binary files differ
diff --git a/test/deflate.h5 b/test/deflate.h5
index 2f62e25..e33af4f 100644
--- a/test/deflate.h5
+++ b/test/deflate.h5
Binary files differ
diff --git a/test/dsets.c b/test/dsets.c
index c0e8702..9775214 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -20,37 +20,52 @@
* Purpose: Tests the dataset interface (H5D)
*/
-#include <stdlib.h>
-#include <time.h>
+#define H5D_FRIEND /*suppress error about including H5FDpkg */
+#define H5D_TESTING
-/*
- * This file needs to access private information from the H5Z package.
- */
-#define H5Z_FRIEND
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#define H5Z_FRIEND /*suppress error about including H5FDpkg */
+
+#include <stdlib.h>
+#include <time.h>
#include "h5test.h"
#include "H5srcdir.h"
+#include "H5Dpkg.h"
+#include "H5FDpkg.h"
+#include "H5VMprivate.h"
+
#include "H5Zpkg.h"
#ifdef H5_HAVE_SZLIB_H
# include "szlib.h"
#endif
+
const char *FILENAME[] = {
- "dataset",
- "compact_dataset",
- "dset_offset",
- "max_compact_dataset",
- "simple",
- "set_local",
- "random_chunks",
- "huge_chunks",
- "chunk_cache",
- "big_chunk",
- "chunk_expand",
- "copy_dcpl_newfile",
- "layout_extend",
- "zero_chunk",
+ "dataset", /* 0 */
+ "compact_dataset", /* 1 */
+ "dset_offset", /* 2 */
+ "max_compact_dataset", /* 3 */
+ "simple", /* 4 */
+ "set_local", /* 5 */
+ "random_chunks", /* 6 */
+ "huge_chunks", /* 7 */
+ "chunk_cache", /* 8 */
+ "big_chunk", /* 9 */
+ "chunk_fast", /* 10 */
+ "chunk_expand", /* 11 */
+ "chunk_fixed", /* 12 */
+ "copy_dcpl_newfile",/* 13 */
+ "partial_chunks", /* 14 */
+ "layout_extend", /* 15 */
+ "zero_chunk", /* 16 */
+ "chunk_single", /* 17 */
+ "swmr_non_latest", /* 18 */
+ "earray_hdr_fd", /* 19 */
+ "farray_hdr_fd", /* 20 */
+ "bt2_hdr_fd", /* 21 */
NULL
};
#define FILENAME_BUF_SIZE 1024
@@ -117,6 +132,22 @@ const char *FILENAME[] = {
#define DSET_DEPREC_NAME_COMPACT "deprecated_compact"
#define DSET_DEPREC_NAME_FILTER "deprecated_filter"
+/* Dataset names for testing Fixed Array Indexing */
+#define DSET_FIXED_MAX "DSET_FIXED_MAX"
+#define DSET_FIXED_NOMAX "DSET_FIXED_NOMAX"
+#define DSET_FIXED_BIG "DSET_FIXED_BIG"
+#define POINTS 72
+#define POINTS_BIG 2500
+
+/* Dataset names used for testing header flush dependencies */
+#define DSET_EARRAY_HDR_FD "earray_hdr_fd"
+#define DSET_FARRAY_HDR_FD "farray_hdr_fd"
+#define DSET_BT2_HDR_FD "bt2_hdr_fd"
+
+/* Dataset names for testing Implicit Indexing */
+#define DSET_SINGLE_MAX "DSET_SINGLE_MAX"
+#define DSET_SINGLE_NOMAX "DSET_SINGLE_NOMAX"
+
#define USER_BLOCK 1024
#define SIXTY_FOUR_KB 65536
@@ -128,6 +159,7 @@ const char *FILENAME[] = {
#define H5Z_FILTER_DEPREC 309
#define H5Z_FILTER_EXPAND 310
#define H5Z_FILTER_CAN_APPLY_TEST2 311
+#define H5Z_FILTER_COUNT 312
/* Flags for testing filters */
#define DISABLE_FLETCHER32 0
@@ -165,9 +197,11 @@ const char *FILENAME[] = {
/* Names for zero-dim test */
#define ZERODIM_DATASET "zerodim"
+#define ZERODIM_DATASET2 "zerodim2"
/* Parameters for zero-dim test */
#define MISSING_CHUNK_DATASET "missing_chunk"
+#define MISSING_CHUNK_DATASET2 "missing_chunk2"
#define MISSING_CHUNK_DIM 100
/* Names for random chunks test */
@@ -192,15 +226,37 @@ const char *FILENAME[] = {
/* Parameters for testing bypassing chunk cache */
#define BYPASS_DATASET1 "Dset1"
#define BYPASS_DATASET2 "Dset2"
+
+#define T_BYPASS_DATASET1 "T_Dset1"
+#define T_BYPASS_DATASET2 "T_Dset2"
+
#define BYPASS_DIM 1000
#define BYPASS_CHUNK_DIM 500
#define BYPASS_FILL_VALUE 7
+/* Parameters for testing extensible array chunk indices */
+#define EARRAY_MAX_RANK 3
+#define EARRAY_DSET_DIM 15
+#define EARRAY_CHUNK_DIM 3
+#define EARRAY_EXTEND_INCR 15
+#define EARRAY_MAX_EXTEND 75
+
/* Shared global arrays */
#define DSET_DIM1 100
#define DSET_DIM2 200
int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2];
double points_dbl[DSET_DIM1][DSET_DIM2], check_dbl[DSET_DIM1][DSET_DIM2];
+size_t count_nbytes_read = 0;
+size_t count_nbytes_written = 0;
+
+/* Declarations for test_idx_compatible() */
+#define DSET "dset"
+#define DSET_FILTER "dset_filter"
+const char *OLD_FILENAME[] = { /* Files created under 1.6 branch and 1.8 branch */
+ "btree_idx_1_6.h5", /* 1.6 HDF5 file */
+ "btree_idx_1_8.h5" /* 1.8 HDF5 file */
+};
+
/* Local prototypes for filter functions */
static size_t filter_bogus(unsigned int flags, size_t cd_nelmts,
@@ -215,6 +271,49 @@ static size_t filter_corrupt(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
static size_t filter_expand(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+static size_t filter_count(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_COUNT[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_COUNT, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "count", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ filter_count, /* The actual filter function */
+}};
+
+
+/*-------------------------------------------------------------------------
+ * Function: filter_count
+ *
+ * Purpose: This filter counts the number of bytes read and written,
+ * incrementing count_nbytes_read or count_nbytes_written as
+ * appropriate.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *
+ * Programmer: Neil Fortner
+ * Wednesday, March 17, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+filter_count(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
+ const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes,
+ size_t H5_ATTR_UNUSED *buf_size, void H5_ATTR_UNUSED **buf)
+{
+ if(flags & H5Z_FLAG_REVERSE)
+ count_nbytes_read += nbytes;
+ else
+ count_nbytes_written += nbytes;
+
+ return nbytes;
+}
/*-------------------------------------------------------------------------
@@ -388,14 +487,14 @@ test_create(hid_t file)
static herr_t
test_simple_io(const char *env_h5_drvr, hid_t fapl)
{
- char filename[FILENAME_BUF_SIZE];
- hid_t file = -1, dataset = -1, space = -1, xfer = -1;
- int i, j, n;
- hsize_t dims[2];
- void *tconv_buf = NULL;
- int f = -1;
- haddr_t offset;
- int rdata[DSET_DIM1][DSET_DIM2];
+ char filename[FILENAME_BUF_SIZE];
+ hid_t file = -1, dataset = -1, space = -1, xfer = -1;
+ int i, j, n;
+ hsize_t dims[2];
+ void *tconv_buf = NULL;
+ int f = -1;
+ haddr_t offset;
+ int rdata[DSET_DIM1][DSET_DIM2];
TESTING("simple I/O");
@@ -465,7 +564,8 @@ test_simple_io(const char *env_h5_drvr, hid_t fapl)
f = HDopen(filename, O_RDONLY, 0);
HDlseek(f, (off_t)offset, SEEK_SET);
- HDread(f, rdata, sizeof(int)*DSET_DIM1*DSET_DIM2);
+ if(HDread(f, rdata, sizeof(int)*DSET_DIM1*DSET_DIM2) < 0)
+ goto error;
/* Check that the values read are the same as the values written */
for(i = 0; i < DSET_DIM1; i++) {
@@ -907,7 +1007,7 @@ test_layout_extend(hid_t fapl)
TESTING("extendible dataset with various layout");
/* Create a file */
- h5_fixname(FILENAME[12], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[15], fapl, filename, sizeof filename);
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
FAIL_STACK_ERROR
@@ -6069,7 +6169,7 @@ test_copy_dcpl(hid_t file, hid_t fapl)
/* Create a second file and create 2 datasets with the copies of the DCPLs in the first
* file. Test whether the copies of DCPLs work. */
- h5_fixname(FILENAME[11], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[13], fapl, filename, sizeof filename);
if((new_file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -6415,15 +6515,28 @@ static herr_t
test_zero_dims(hid_t file)
{
hid_t s = -1, d = -1, dcpl = -1;
- hsize_t dsize = 0, dmax = H5S_UNLIMITED, csize = 5;
+ hid_t s2 = -1, d2 = -1, dcpl2 = -1;
+ hsize_t dzero = 0, dmax = H5S_UNLIMITED, csize = 5;
+ hsize_t dzero2[2] = {0, 0};
+ hsize_t dmax2[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t csize2[2] = {5, 5};
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ H5F_libver_t low; /* File format low bound */
herr_t ret;
TESTING("I/O on datasets with zero-sized dims");
+ /* Get the file's file access property list */
+ if((fapl = H5Fget_access_plist(file)) < 0) FAIL_STACK_ERROR
+
+ /* Get library format */
+ if(H5Pget_libver_bounds(fapl, &low, NULL) < 0) FAIL_STACK_ERROR
+
/*
* One-dimensional dataset
*/
- if((s = H5Screate_simple(1, &dsize, &dmax)) < 0) FAIL_STACK_ERROR
+ if((s = H5Screate_simple(1, &dzero, &dmax)) < 0) FAIL_STACK_ERROR
/* Try creating chunked dataset with undefined chunk dimensions */
if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
@@ -6439,7 +6552,7 @@ test_zero_dims(hid_t file)
/* Try creating chunked dataset with zero-sized chunk dimensions */
H5E_BEGIN_TRY {
- ret = H5Pset_chunk(dcpl, 1, &dsize);
+ ret = H5Pset_chunk(dcpl, 1, &dzero);
} H5E_END_TRY;
if(ret > 0)
FAIL_PUTS_ERROR("set zero-sized chunk dimensions")
@@ -6451,6 +6564,16 @@ test_zero_dims(hid_t file)
if(H5Pset_chunk(dcpl, 1, &csize) < 0) FAIL_STACK_ERROR
if((d = H5Dcreate2(file, ZERODIM_DATASET, H5T_NATIVE_INT, s, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(d, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("should be using extensible array as index");
+ } else if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+
/* Various no-op writes */
if(H5Dwrite(d, H5T_NATIVE_INT, s, s, H5P_DEFAULT, (void*)911) < 0) FAIL_STACK_ERROR
if(H5Dwrite(d, H5T_NATIVE_INT, s, s, H5P_DEFAULT, NULL) < 0) FAIL_STACK_ERROR
@@ -6467,6 +6590,56 @@ test_zero_dims(hid_t file)
if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
if(H5Sclose(s) < 0) FAIL_STACK_ERROR
+ /*
+ * Two-dimensional dataset
+ */
+ if((s2 = H5Screate_simple(2, dzero2, dmax2)) < 0) FAIL_STACK_ERROR
+
+ /* Try creating chunked dataset with undefined chunk dimensions */
+ if((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+ if(H5Pset_layout(dcpl2, H5D_CHUNKED) < 0) FAIL_STACK_ERROR
+
+ H5E_BEGIN_TRY {
+ d2 = H5Dcreate2(file, ZERODIM_DATASET2, H5T_NATIVE_INT, s2, H5P_DEFAULT, dcpl2, H5P_DEFAULT);
+ } H5E_END_TRY;
+ if(d2 > 0) {
+ H5Dclose(d2);
+ FAIL_PUTS_ERROR("created dataset with undefined chunk dimensions")
+ } /* end if */
+
+ /* Try creating chunked dataset with zero-sized chunk dimensions */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_chunk(dcpl2, 2, dzero2);
+ } H5E_END_TRY;
+ if(ret > 0)
+ FAIL_PUTS_ERROR("set zero-sized chunk dimensions")
+
+ if(H5Pclose(dcpl2) < 0) FAIL_STACK_ERROR
+
+ /* Write to the zero-sized extendible dataset */
+ if((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl2, 2, csize2) < 0) FAIL_STACK_ERROR
+
+ /* Create the dataset */
+ if((d2 = H5Dcreate2(file, ZERODIM_DATASET2, H5T_NATIVE_INT, s2, H5P_DEFAULT, dcpl2, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(d2, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+ } else if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+
+ /* Just a no-op */
+ if(H5Dwrite(d2, H5T_NATIVE_INT, s2, s2, H5P_DEFAULT, (void*)911) < 0) FAIL_STACK_ERROR
+
+ if(H5Dclose(d2) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl2) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(s2) < 0) FAIL_STACK_ERROR
+
PASSED();
return 0;
@@ -6475,6 +6648,10 @@ error:
H5Pclose(dcpl);
H5Dclose(d);
H5Sclose(s);
+
+ H5Pclose(dcpl2);
+ H5Dclose(d2);
+ H5Sclose(s2);
} H5E_END_TRY;
return -1;
} /* end test_zero_dims() */
@@ -6498,39 +6675,89 @@ error:
static herr_t
test_missing_chunk(hid_t file)
{
- hid_t s = -1, d = -1, dcpl = -1;
- hsize_t hs_start[1];
- hsize_t hs_stride[1],
- hs_count[1],
- hs_block[1];
+ hid_t d = -1, did2 = -1; /* Dataset IDs */
+ hid_t dcpl = -1, dcpl2 = -1; /* Dataset creation property IDs */
+ hid_t s = -1, sid2 = -1; /* Dataspace ID */
+ hsize_t hs_start[1], hs_stride[1], hs_count[1], hs_block[1]; /* Hyperslab setting */
+ hsize_t hs_start2[2], hs_stride2[2], hs_count2[2], hs_block2[2];/* Hyperslab setting */
+
+ /* Buffers for reading/writing dataset */
int wdata[MISSING_CHUNK_DIM],
rdata[MISSING_CHUNK_DIM];
+ int wdata2[MISSING_CHUNK_DIM][MISSING_CHUNK_DIM],
+ rdata2[MISSING_CHUNK_DIM][MISSING_CHUNK_DIM];
+
+ /* Setting for 1-D dataset */
hsize_t dsize=100, dmax=H5S_UNLIMITED;
hsize_t csize=5;
- size_t u;
+
+ /* Setting for 2-D dataset */
+ hsize_t dsize2[2] = {100, 100}, dmax2[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t csize2[2] = {5, 5};
+ size_t u, i, j; /* Local Index variable */
+
+ hid_t fapl; /* File access property list */
+ H5F_libver_t low; /* File format low bound */
+ H5D_chunk_index_t idx_type, idx_type2; /* Dataset chunk index types */
TESTING("Read dataset with unwritten chunk & undefined fill value");
+ /* Get the file's file access property list */
+ if((fapl = H5Fget_access_plist(file)) < 0) TEST_ERROR;
+
+ /* Get library format */
+ if(H5Pget_libver_bounds(fapl, &low, NULL) < 0) TEST_ERROR;
+
/* Initialize data for 1-D dataset */
for(u = 0; u < MISSING_CHUNK_DIM; u++) {
wdata[u] = (int)u;
rdata[u] = 911;
} /* end for */
+ /* Initialize data for 2-D dataset */
+ for(i = 0; i < MISSING_CHUNK_DIM; i++) {
+ for(j = 0; j < MISSING_CHUNK_DIM; j++) {
+ wdata2[i][j] = (int)j + (i * MISSING_CHUNK_DIM);
+ rdata2[i][j] = 911;
+ }
+ } /* end for */
+
/* Create dataspace */
if((s = H5Screate_simple(1, &dsize, &dmax)) < 0) TEST_ERROR;
+ if((sid2 = H5Screate_simple(2, dsize2, dmax2)) < 0) TEST_ERROR;
/* Create dataset creation property list */
if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR;
+ if((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR;
/* Set to chunked */
if(H5Pset_chunk(dcpl, 1, &csize) < 0) TEST_ERROR;
+ if(H5Pset_chunk(dcpl2, 2, csize2) < 0) TEST_ERROR;
/* Undefine fill value */
if(H5Pset_fill_value(dcpl, H5T_NATIVE_INT, NULL) < 0) TEST_ERROR;
+ if(H5Pset_fill_value(dcpl2, H5T_NATIVE_INT, NULL) < 0) TEST_ERROR;
- /* Create dataset */
+ /* Create the 1-D & 2-D datasets */
if((d = H5Dcreate2(file, MISSING_CHUNK_DATASET, H5T_NATIVE_INT, s, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if((did2 = H5Dcreate2(file, MISSING_CHUNK_DATASET2, H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl2, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Get the chunk index types */
+ if(H5D__layout_idx_type_test(d, &idx_type) < 0) TEST_ERROR;
+ if(H5D__layout_idx_type_test(did2, &idx_type2) < 0) TEST_ERROR;
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("should be using Extensible Array as index");
+ if(idx_type2 != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+ } else {
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ if(idx_type2 != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ }
/* Select elements in every other chunk for 1-D dataset */
hs_start[0]=0;
@@ -6540,13 +6767,23 @@ test_missing_chunk(hid_t file)
if(H5Sselect_hyperslab(s, H5S_SELECT_SET, hs_start, hs_stride, hs_count,
hs_block) < 0) TEST_ERROR;
- /* Write selected data */
+ /* Select elements in every other chunk for 2-D dataset */
+ hs_start2[0] = hs_start2[1] = 0;
+ hs_stride2[0] = hs_stride2[1] = 10;
+ hs_count2[0] = hs_count2[1] = 10;
+ hs_block2[0] = hs_block2[1] = 5;
+ if(H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_start2, hs_stride2, hs_count2,
+ hs_block2) < 0) TEST_ERROR;
+
+ /* Write selected data to the datasets */
if(H5Dwrite(d, H5T_NATIVE_INT, s, s, H5P_DEFAULT, wdata) < 0) TEST_ERROR;
+ if(H5Dwrite(did2, H5T_NATIVE_INT, sid2, sid2, H5P_DEFAULT, wdata2) < 0) TEST_ERROR;
- /* Read all data */
+ /* Read all data from the datasets */
if(H5Dread(d, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata) < 0) TEST_ERROR;
+ if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata2) < 0) TEST_ERROR;
- /* Validata values read */
+ /* Validata values read for the 1-D dataset */
for(u=0; u<MISSING_CHUNK_DIM; u++) {
if((u%10)>=5) {
if(rdata[u]!=911) {
@@ -6562,10 +6799,34 @@ test_missing_chunk(hid_t file)
} /* end else */
} /* end for */
+ /* Validata values read for the 2-D dataset */
+ for(i = 0; i < MISSING_CHUNK_DIM; i++) {
+ for(j = 0; j < MISSING_CHUNK_DIM; j++) {
+
+ if((i % 10) >= 5 || (j % 10) >= 5) {
+ if(rdata2[i][j] != 911) {
+ printf(" Line %d: Incorrect value, rdata2[%u][%u] = %d\n",
+ __LINE__,(unsigned)i, (unsigned)j, rdata2[i][j]);
+ TEST_ERROR;
+ } /* end if */
+ } /* end if */
+ else {
+ if(rdata2[i][j] != wdata2[i][j]) {
+ printf(" Line %d: Incorrect value, wdata2[%u][%u] = %d, rdata2[%u][%u] = %d\n",
+ __LINE__,(unsigned)i, (unsigned)j, wdata2[i][j],(unsigned)i, (unsigned)j, rdata2[i][j]);
+ TEST_ERROR;
+ } /* end if */
+ } /* end else */
+ } /* end for */
+ } /* end for */
+
/* Close everything */
if(H5Pclose(dcpl) < 0) TEST_ERROR;
+ if(H5Pclose(dcpl2) < 0) TEST_ERROR;
if(H5Sclose(s) < 0) TEST_ERROR;
+ if(H5Sclose(sid2) < 0) TEST_ERROR;
if(H5Dclose(d) < 0) TEST_ERROR;
+ if(H5Dclose(did2) < 0) TEST_ERROR;
PASSED();
return 0;
@@ -6573,19 +6834,20 @@ test_missing_chunk(hid_t file)
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
+ H5Pclose(dcpl2);
H5Dclose(d);
+ H5Dclose(did2);
H5Sclose(s);
+ H5Sclose(sid2);
} H5E_END_TRY;
return -1;
} /* end test_missing_chunk() */
/*-------------------------------------------------------------------------
- * Function: test_random_chunks
+ * Function: test_random_chunks_real
*
- * Purpose: Tests that write/read on randomly selected chunks in 2 datasets.
- * One dataset has fixed dimensions, and the other has unlimited
- * dimensions which are extended before write/read operations.
+ * Purpose: Tests that write/read on randomly selected chunks
*
*
* Return: Success: 0
@@ -6597,7 +6859,7 @@ error:
*-------------------------------------------------------------------------
*/
static herr_t
-test_random_chunks(hid_t fapl)
+test_random_chunks_real(const char *testname, hbool_t early_alloc, hid_t fapl)
{
char filename[FILENAME_BUF_SIZE];
hid_t s=-1, m=-1, d=-1, dcpl=-1, file=-1;
@@ -6606,18 +6868,23 @@ test_random_chunks(hid_t fapl)
check2[20][20];
hsize_t coord[NPOINTS][2];
hsize_t dsize[2]={100,100}, dmax[2]={H5S_UNLIMITED, H5S_UNLIMITED}, csize[2]={10,10}, nsize[2]={200,200};
+ hsize_t fixed_dmax[2] = {1000, 1000};
hsize_t msize[1]={NPOINTS};
const char dname[]="dataset";
int chunk_row, chunk_col;
size_t i, j;
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ H5F_libver_t low; /* File format low bound */
- TESTING("Write/read on randomly selected chunks");
+ TESTING(testname);
assert(NPOINTS < 100);
h5_fixname(FILENAME[6], fapl, filename, sizeof filename);
+ if(H5Pget_libver_bounds(fapl, &low, NULL) < 0) TEST_ERROR;
+
/* Create file for first test */
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
@@ -6630,8 +6897,9 @@ test_random_chunks(hid_t fapl)
/* Set chunked layout */
if(H5Pset_chunk(dcpl, 2, csize) < 0) TEST_ERROR;
- /* Set early allocation time */
- if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR;
+ /* Set early allocation time for one dataset; the other dataset is using default alloc time */
+ if(early_alloc)
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR;
/* Create dataset */
if((d = H5Dcreate2(file, dname, H5T_NATIVE_INT, s, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
@@ -6669,12 +6937,28 @@ test_random_chunks(hid_t fapl)
if(H5Dclose(d) < 0) TEST_ERROR;
if(H5Fclose(file) < 0) TEST_ERROR;
- /* Open file again */
+ /* Open first file again */
if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR;
/* Open dataset */
if((d = H5Dopen2(file, dname, H5P_DEFAULT)) < 0) TEST_ERROR;
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(d, &idx_type) < 0) TEST_ERROR;
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(early_alloc) {
+ if(idx_type != H5D_CHUNK_IDX_NONE)
+ FAIL_PUTS_ERROR("should be using Non-Index as index");
+ } /* end if */
+ else {
+ if(idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using Fixed Array as index");
+ } /* end else */
+ } else if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+
/* Get dataset dataspace */
if((s = H5Dget_space(d)) < 0) TEST_ERROR;
@@ -6702,11 +6986,117 @@ test_random_chunks(hid_t fapl)
if(H5Fclose(file) < 0) TEST_ERROR;
- /* Create file for second test */
+ /* Create second file */
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
/* Create dataspace with unlimited maximum dimensions */
- if((s = H5Screate_simple(2, dsize, dmax)) < 0) TEST_ERROR;
+ if(early_alloc) {
+ if((s = H5Screate_simple(2, dsize, fixed_dmax)) < 0) TEST_ERROR;
+ } else
+ if((s = H5Screate_simple(2, dsize, dmax)) < 0) TEST_ERROR;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR;
+
+ /* Set chunked layout */
+ if(H5Pset_chunk(dcpl, 2, csize) < 0) TEST_ERROR;
+
+ /* Set early allocation time for one dataset; the other dataset is using default alloc time */
+ if(early_alloc)
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR;
+
+ /* Create dataset */
+ if((d = H5Dcreate2(file, dname, H5T_NATIVE_INT, s, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(d, &idx_type) < 0) TEST_ERROR;
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(early_alloc) {
+ if(idx_type != H5D_CHUNK_IDX_NONE)
+ FAIL_PUTS_ERROR("should be using implicit indexing");
+ } else if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+ } else if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+
+ /* Extend both dimensions of the dataset */
+ if(H5Dset_extent(d, nsize) < 0) TEST_ERROR;
+
+ /* Reset the dataset dataspace to new dimensions */
+ if(H5Sset_extent_simple(s, 2, nsize, dmax) < 0) TEST_ERROR;
+
+ /* Initialize check buffer for repeated coordinates */
+ for(i = 0; i < nsize[0]/csize[0]; i++)
+ for(j = 0; j < nsize[1] / csize[1]; j++)
+ check2[i][j] = 0;
+
+ /* Generate random point coordinates. Only one point is selected per chunk */
+ for(i = 0; i < NPOINTS; i++){
+ do {
+ chunk_row = (int)HDrandom() % (int)(nsize[0] / csize[0]);
+ chunk_col = (int)HDrandom() % (int)(nsize[1] / csize[1]);
+ } while (check2[chunk_row][chunk_col]);
+
+ wbuf[i] = check2[chunk_row][chunk_col] = chunk_row + chunk_col + 1;
+ coord[i][0] = (hsize_t)chunk_row * csize[0];
+ coord[i][1] = (hsize_t)chunk_col * csize[1];
+ }
+
+ /* Create dataspace for write buffer */
+ if((m = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for writing */
+ if(H5Sselect_elements(s, H5S_SELECT_SET, (size_t)NPOINTS, (const hsize_t *)coord) < 0) TEST_ERROR;
+
+ /* Write into dataset */
+ if(H5Dwrite(d, H5T_NATIVE_INT, m, s, H5P_DEFAULT, wbuf) < 0) TEST_ERROR;
+
+ /* Close resources */
+ if(H5Sclose(s) < 0) TEST_ERROR;
+ if(H5Sclose(m) < 0) TEST_ERROR;
+ if(H5Pclose(dcpl) < 0) TEST_ERROR;
+ if(H5Dclose(d) < 0) TEST_ERROR;
+ if(H5Fclose(file) < 0) TEST_ERROR;
+
+ /* Open second file again */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR;
+
+ /* Open dataset */
+ if((d = H5Dopen2(file, dname, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Get dataset dataspace */
+ if((s = H5Dget_space(d)) < 0) TEST_ERROR;
+
+ /* Create dataspace for read buffer */
+ if((m = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for reading */
+ if(H5Sselect_elements (s, H5S_SELECT_SET, (size_t)NPOINTS, (const hsize_t *)coord) < 0) TEST_ERROR;
+
+ /* Read from dataset */
+ if(H5Dread(d, H5T_NATIVE_INT, m, s, H5P_DEFAULT, rbuf) < 0) TEST_ERROR;
+
+ /* Verify that written and read data are the same */
+ for(i = 0; i < NPOINTS; i++)
+ if(rbuf[i] != wbuf[i]){
+ printf(" Line %d: Incorrect value, wbuf[%u]=%d, rbuf[%u]=%d\n",__LINE__,(unsigned)i,wbuf[i],(unsigned)i,rbuf[i]);
+ TEST_ERROR;
+ } /* end if */
+
+ /* Close resources */
+ if(H5Sclose(s) < 0) TEST_ERROR;
+ if(H5Sclose(m) < 0) TEST_ERROR;
+ if(H5Dclose(d) < 0) TEST_ERROR;
+ if(H5Fclose(file) < 0) TEST_ERROR;
+
+
+ /* Create third file */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
+
+ /* Create dataspace with fixed maximum dimensions */
+ if((s = H5Screate_simple(2, dsize, fixed_dmax)) < 0) TEST_ERROR;
/* Create dataset creation property list */
if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR;
@@ -6714,12 +7104,29 @@ test_random_chunks(hid_t fapl)
/* Set chunked layout */
if(H5Pset_chunk(dcpl, 2, csize) < 0) TEST_ERROR;
- /* Set allocation time to early */
- if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR;
+ /* Set early allocation time for one dataset; the other dataset is using default alloc time */
+ if(early_alloc)
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR;
/* Create dataset */
if((d = H5Dcreate2(file, dname, H5T_NATIVE_INT, s, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(d, &idx_type) < 0) TEST_ERROR;
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(early_alloc) {
+ if(idx_type != H5D_CHUNK_IDX_NONE)
+ FAIL_PUTS_ERROR("should be using Non-Index as index");
+ } /* end if */
+ else {
+ if(idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using Fixed Array as index");
+ } /* end else */
+ } else if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+
/* Extend both dimensions of the dataset */
if(H5Dset_extent(d, nsize) < 0) TEST_ERROR;
@@ -6759,7 +7166,7 @@ test_random_chunks(hid_t fapl)
if(H5Dclose(d) < 0) TEST_ERROR;
if(H5Fclose(file) < 0) TEST_ERROR;
- /* Open file again */
+ /* Open third file again */
if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) TEST_ERROR;
/* Open dataset */
@@ -6802,6 +7209,43 @@ error:
H5Fclose(file);
} H5E_END_TRY;
return -1;
+} /* end test_random_chunks_real() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_random_chunks
+ *
+ * Purpose: Tests that write/read on randomly selected chunks
+ * First file:
+ * One dataset has fixed dimensions without max. dims & H5D_ALLOC_TIME_EARLY
+ * One dataset has fixed dimensions without max. dims & default alloc time
+ * Second file:
+ * One extendible dataset with unlimited max. dims & H5D_ALLOC_TIME_EARLY
+ * One extendible dataset with unlimited max. dims & default alloc time
+ * third file:
+ * one extendible dataset with fixed max. dims & H5D_ALLOC_TIME_EARLY
+ * one extendible dataset with fixed max. dims & default alloc time
+ *
+ * All the datasets in second & third files are extended before write/read operations
+ *
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Christian Chilan
+ * Monday, March 26, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_random_chunks(hid_t fapl)
+{
+ int nerrors = 0; /* Errors in sub-tests */
+
+ nerrors += test_random_chunks_real("Write/read on randomly selected chunks w/non-implicit index", FALSE, fapl);
+ nerrors += test_random_chunks_real("Write/read on randomly selected chunks w/implicit index", TRUE, fapl);
+
+ return nerrors;;
} /* end test_random_chunks() */
#ifndef H5_NO_DEPRECATED_SYMBOLS
@@ -7361,23 +7805,36 @@ static herr_t
test_big_chunks_bypass_cache(hid_t fapl)
{
char filename[FILENAME_BUF_SIZE];
- hid_t fid = -1; /* File ID */
- hid_t fapl_local = -1; /* File access property list ID */
- hid_t dcpl = -1; /* Dataset creation property list ID */
- hid_t sid = -1; /* Dataspace ID */
- hid_t dsid = -1; /* Dataset ID */
- hsize_t dim, chunk_dim; /* Dataset and chunk dimensions */
- size_t rdcc_nelmts, rdcc_nbytes;
- int fvalue = BYPASS_FILL_VALUE;
- hsize_t count, stride, offset, block;
+ hid_t fid = -1; /* File ID */
+ hid_t fapl_local = -1; /* File access property list ID */
+ hid_t dcpl = -1, t_dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1, t_sid = -1; /* Dataspace ID */
+ hid_t mid; /* Memory space ID */
+ hid_t dsid = -1, t_dsid = -1; /* Dataset ID */
+ hsize_t dim, chunk_dim; /* Dataset and chunk dimensions */
+ hsize_t t_dim[2], t_max[2], t_chunk_dim[2]; /* Dataset and chunk dimensions */
+ size_t rdcc_nelmts, rdcc_nbytes; /* Chunk cache parameters */
+ int fvalue = BYPASS_FILL_VALUE; /* Fill value */
+ hsize_t count, stride, offset, block; /* Setting for hyperslab (1-D) */
+ hsize_t t_count[2], t_stride[2], t_offset[2], t_block[2]; /* Setting for hyperslab (2-D) */
+ /* Buffer for reading and writing data (1-D) */
static int wdata[BYPASS_CHUNK_DIM/2], rdata1[BYPASS_DIM],
- rdata2[BYPASS_CHUNK_DIM/2];
- int i, j;
+ rdata2[BYPASS_CHUNK_DIM/2];
+ /* Buffer for reading and writing data (2-D) */
+ static int t_wdata[BYPASS_CHUNK_DIM/2][BYPASS_CHUNK_DIM/2], t_rdata1[BYPASS_DIM][BYPASS_DIM],
+ t_rdata2[BYPASS_CHUNK_DIM/2][BYPASS_CHUNK_DIM/2];
+ int i, j; /* Local index variables */
+ H5F_libver_t low; /* File format low bound */
+ H5D_chunk_index_t idx_type, t_idx_type; /* Dataset chunk index types */
+
TESTING("big chunks bypassing the cache");
h5_fixname(FILENAME[9], fapl, filename, sizeof filename);
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(fapl, &low, NULL) < 0) FAIL_STACK_ERROR
+
/* Copy fapl passed to this function (as we will be modifying it) */
if((fapl_local = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR
@@ -7389,52 +7846,101 @@ test_big_chunks_bypass_cache(hid_t fapl)
/* Create file */
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_local)) < 0) FAIL_STACK_ERROR
- /* Create 1-D dataspace */
- dim = BYPASS_DIM;
+ /* Create 1-D & 2-D dataspace */
+ dim = t_dim[0] = t_dim[1] = BYPASS_DIM;
+ t_max[0] = t_max[1] = H5S_UNLIMITED;
if((sid = H5Screate_simple(1, &dim, NULL)) < 0) FAIL_STACK_ERROR
+ if((t_sid = H5Screate_simple(2, t_dim, t_max)) < 0) FAIL_STACK_ERROR
- /* Create dataset creation property list */
+ /* Create 1-D & 2-D dataset creation property list */
if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+ if((t_dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
- /* Define chunk size. There will be only 2 chunks in the dataset. */
- chunk_dim = BYPASS_CHUNK_DIM;
+ /* Define chunk size. */
+ /* There will be 2 chunks in 1-D dataset & 4 chunks in the 2-D dataset */
+ chunk_dim = t_chunk_dim[0] = t_chunk_dim[1] = BYPASS_CHUNK_DIM;
if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR
+ if(H5Pset_chunk(t_dcpl, 2, t_chunk_dim) < 0) FAIL_STACK_ERROR
/* Define fill value, fill time, and chunk allocation time */
if(H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fvalue) < 0) FAIL_STACK_ERROR
+ if(H5Pset_fill_value(t_dcpl, H5T_NATIVE_INT, &fvalue) < 0) FAIL_STACK_ERROR
+
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0) FAIL_STACK_ERROR
+ if(H5Pset_fill_time(t_dcpl, H5D_FILL_TIME_IFSET) < 0) FAIL_STACK_ERROR
+
if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR) < 0) FAIL_STACK_ERROR
+ if(H5Pset_alloc_time(t_dcpl, H5D_ALLOC_TIME_INCR) < 0) FAIL_STACK_ERROR
/* Create the first 1-D dataset */
if((dsid = H5Dcreate2(fid, BYPASS_DATASET1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR
+ /* Create the first 2-D dataset */
+ if((t_dsid = H5Dcreate2(fid, T_BYPASS_DATASET1, H5T_NATIVE_INT, t_sid, H5P_DEFAULT, t_dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the chunk index types for 1-D and 2-d datasets */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR
+ if(H5D__layout_idx_type_test(t_dsid, &t_idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type expected depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_FARRAY) FAIL_PUTS_ERROR("should be using Fixed Array as index");
+ if(t_idx_type != H5D_CHUNK_IDX_BT2) FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+ } else {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ if(t_idx_type != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
/* Select first chunk to write the data */
- offset = 0;
- count = 1;
- stride = 1;
- block = BYPASS_CHUNK_DIM / 2;
+ offset = t_offset[0] = t_offset[1] = 0;
+ count = t_count[0] = t_count[1] = 1;
+ stride = t_stride[0] = t_stride[1] = 1;
+ block = t_block[0] = t_block[1] = BYPASS_CHUNK_DIM / 2;
if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &offset, &stride, &count, &block) < 0)
FAIL_STACK_ERROR
- /* Initialize data to write */
+ if(H5Sselect_hyperslab(t_sid, H5S_SELECT_SET, t_offset, t_stride, t_count, t_block) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize data to write for 1-D dataset */
for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
wdata[i] = i;
+ /* Initialize data to write for 2-D dataset */
+ for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
+ for(j = 0; j < BYPASS_CHUNK_DIM / 2; j++)
+ t_wdata[i][j] = j;
+
+ /* Set up memory space for the 2-D dataset */
+ mid = H5Screate_simple(2, t_block, NULL);
+
+ /* Write to the first 1-D & 2-D datasets */
/* This write should go through the cache because fill value is used. */
if(H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, wdata) < 0)
FAIL_STACK_ERROR
+ if(H5Dwrite(t_dsid, H5T_NATIVE_INT, mid, t_sid, H5P_DEFAULT, t_wdata) < 0)
+ FAIL_STACK_ERROR
+ /* Close the first 1-D & 2-D datasets */
if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(t_dsid) < 0) FAIL_STACK_ERROR
- /* Reopen the dataset */
+ /* Reopen the first 1-D & 2-D datasets */
if((dsid = H5Dopen2(fid, BYPASS_DATASET1, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+ if((t_dsid = H5Dopen2(fid, T_BYPASS_DATASET1, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
/* Reads both 2 chunks. Reading the second chunk should bypass the cache because the
* chunk is bigger than the cache size and it isn't allocated on disk. */
if(H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata1) < 0)
FAIL_STACK_ERROR
+ if(H5Dread(t_dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, t_rdata1) < 0)
+ FAIL_STACK_ERROR
+ /* Verify data for the first 1-D dataset */
for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
if(rdata1[i] != i) {
printf(" Read different values than written in the 1st chunk.\n");
@@ -7449,41 +7955,88 @@ test_big_chunks_bypass_cache(hid_t fapl)
TEST_ERROR
} /* end if */
- /* Close the first dataset */
+ /* Verify data for the first 2-D dataset */
+ for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
+ for(j = 0; j < BYPASS_CHUNK_DIM / 2; j++)
+ if(t_rdata1[i][j] != j) {
+ printf(" Read different values than written in the 1st chunk.\n");
+ printf(" At line %d and index (%d, %d), t_rdata1 = %d. It should be %d.\n",
+ __LINE__, i, j, t_rdata1[i][j], j);
+ TEST_ERROR
+ } /* end if */
+
+ for(i = BYPASS_CHUNK_DIM / 2; i < BYPASS_DIM; i++)
+ for(j = BYPASS_CHUNK_DIM / 2; j < BYPASS_DIM; j++)
+ if(t_rdata1[i][j] != fvalue) {
+ printf(" Read different values than written in the 2nd chunk.\n");
+ printf(" At line %d and index (%d, %d), t_rdata1 = %d. It should be %d.\n",
+ __LINE__, i, j, t_rdata1[i][j], fvalue);
+ TEST_ERROR
+ } /* end if */
+
+ /* Close the first 1-D & 2-D datasets */
if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(t_dsid) < 0) FAIL_STACK_ERROR
/* Create a second dataset without fill value. This time, both write
* and read should bypass the cache because the chunk is bigger than the
* cache size and it's not allocated on disk. */
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_NEVER) < 0) FAIL_STACK_ERROR
+ if(H5Pset_fill_time(t_dcpl, H5D_FILL_TIME_NEVER) < 0) FAIL_STACK_ERROR
+ /* Create a second 1-D & 2-D dataset */
if((dsid = H5Dcreate2(fid, BYPASS_DATASET2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR
+ if((t_dsid = H5Dcreate2(fid, T_BYPASS_DATASET2, H5T_NATIVE_INT, t_sid, H5P_DEFAULT, t_dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ /* Write to the second 1-D & 2-D dataset */
if(H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, wdata) < 0)
FAIL_STACK_ERROR
+ if(H5Dwrite(t_dsid, H5T_NATIVE_INT, mid, t_sid, H5P_DEFAULT, t_wdata) < 0)
+ FAIL_STACK_ERROR
+ /* Close the second 1-D & 2-D dataset */
if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(t_dsid) < 0) FAIL_STACK_ERROR
- /* Reopen the dataset */
+ /* Reopen the second 1-d dataset and 2-d dataset */
if((dsid = H5Dopen2(fid, BYPASS_DATASET2, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+ if((t_dsid = H5Dopen2(fid, T_BYPASS_DATASET2, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
/* Read back only the part that was written to the file. Reading the
* half chunk should bypass the cache because the chunk is bigger than
* the cache size. */
if(H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, rdata2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dread(t_dsid, H5T_NATIVE_INT, mid, t_sid, H5P_DEFAULT, t_rdata2) < 0)
+ FAIL_STACK_ERROR
+ /* Verify data for the second 1-D dataset */
for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
- if(rdata2[i] != i) {
+ if(rdata2[i] != i) {
printf(" Read different values than written in the chunk.\n");
printf(" At line %d and index %d, rdata2 = %d. It should be %d.\n", __LINE__, i, rdata2[i], i);
TEST_ERROR
} /* end if */
+ /* Verify data for the second 2-D dataset */
+ for(i = 0; i < BYPASS_CHUNK_DIM / 2; i++)
+ for(j = 0; j < BYPASS_CHUNK_DIM / 2; j++)
+ if(t_rdata2[i][j] != j) {
+ printf(" Read different values than written in the chunk.\n");
+ printf(" At line %d and index (%d, %d), t_rdata2 = %d. It should be %d.\n",
+ __LINE__, i, j, t_rdata2[i][j], j);
+ TEST_ERROR
+ } /* end if */
+
/* Close IDs */
if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(t_sid) < 0) FAIL_STACK_ERROR
if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(t_dsid) < 0) FAIL_STACK_ERROR
if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(t_dcpl) < 0) FAIL_STACK_ERROR
if(H5Pclose(fapl_local) < 0) FAIL_STACK_ERROR
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
@@ -7493,14 +8046,596 @@ test_big_chunks_bypass_cache(hid_t fapl)
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
+ H5Pclose(t_dcpl);
H5Pclose(fapl_local);
H5Dclose(dsid);
+ H5Dclose(t_dsid);
H5Sclose(sid);
+ H5Sclose(t_sid);
H5Fclose(fid);
} H5E_END_TRY;
return -1;
} /* end test_big_chunks_bypass_cache() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_chunk_fast
+ *
+ * Purpose: Tests support for extensible arrays as chunk index.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, February 3, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_chunk_fast(const char *env_h5_driver, hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t my_fapl = -1; /* File access property list ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t scalar_sid = -1;/* Scalar dataspace ID */
+ hid_t dsid = -1; /* Dataset ID */
+ hsize_t fill; /* Temporary value, for filling arrays */
+ hsize_t hs_size[EARRAY_MAX_RANK]; /* Hyperslab size */
+ hsize_t chunk_dim[EARRAY_MAX_RANK]; /* Chunk dimensions */
+ H5F_libver_t low; /* File format low bound */
+ unsigned swmr; /* Whether file should be written with SWMR access enabled */
+
+ TESTING("datasets w/extensible array as chunk index");
+
+ h5_fixname(FILENAME[10], fapl, filename, sizeof filename);
+
+ /* Copy the file access property list */
+ if((my_fapl = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Turn on the chunk cache again */
+ {
+ int mdc_nelmts; /* # of elements in metadata cache */
+ size_t rdcc_nelmts; /* # of chunks in chunk cache */
+ size_t rdcc_nbytes; /* # of bytes in chunk cache */
+ double rdcc_w0; /* write-ratio for chunk cache */
+
+ if(H5Pget_cache(my_fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0) < 0) FAIL_STACK_ERROR
+ rdcc_nbytes = 1048576;
+ if(H5Pset_cache(my_fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0) FAIL_STACK_ERROR
+ } /* end block */
+
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(my_fapl, &low, NULL) < 0) FAIL_STACK_ERROR
+
+ /* Create scalar dataspace */
+ if((scalar_sid = H5Screate(H5S_SCALAR)) < 0) FAIL_STACK_ERROR
+
+ /* Initialize chunk dimensions */
+ fill = EARRAY_CHUNK_DIM;
+ H5VM_array_fill(chunk_dim, &fill, sizeof(fill), EARRAY_MAX_RANK);
+
+ /* Initialize hyperslab size */
+ fill = 1;
+ H5VM_array_fill(hs_size, &fill, sizeof(fill), EARRAY_MAX_RANK);
+
+ /* Loop over using SWMR access to write */
+ for(swmr = 0; swmr <= 1; swmr++) {
+ int compress; /* Whether chunks should be compressed */
+
+ /* SWMR is now supported with/without latest format: */
+ /* (1) write+latest-format (2) SWMR-write+non-latest-format */
+
+ /* Skip this iteration if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ if(!H5FD_supports_swmr_test(env_h5_driver)) {
+ continue;
+ }
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Loop over compressing chunks */
+ for(compress = 0; compress <= 1; compress++)
+#else
+ /* Loop over without compression */
+ for(compress = 0; compress <= 0; compress++)
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ {
+ H5D_alloc_time_t alloc_time; /* Storage allocation time */
+
+ /* Loop over storage allocation time */
+ for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) {
+ unsigned ndims; /* Current # of dims to test */
+
+ /* Loop over dataspace ranks to test */
+ for(ndims = 1; ndims < (EARRAY_MAX_RANK + 1); ndims++) {
+ unsigned unlim_dim;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Set chunking & chunk dims */
+ if(H5Pset_chunk(dcpl, (int)ndims, chunk_dim) < 0) FAIL_STACK_ERROR
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Check if we should compress the chunks */
+ if(compress)
+ if(H5Pset_deflate(dcpl, 9) < 0) FAIL_STACK_ERROR
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Set fill time */
+ if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
+
+ /* Set allocation time */
+ if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+
+ /* Loop over which dimension is unlimited */
+ for(unlim_dim = 0; unlim_dim < ndims; unlim_dim++) {
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ hsize_t dim[EARRAY_MAX_RANK], max_dim[EARRAY_MAX_RANK]; /* Dataset dimensions */
+ hsize_t swizzled_dim[EARRAY_MAX_RANK]; /* Dimensions, with unlimited dimension moved to rank 0 */
+ hsize_t down[EARRAY_MAX_RANK]; /* 'down' sizes, for computing array index */
+ hsize_t hs_offset[EARRAY_MAX_RANK]; /* Hyperslab offset */
+ hssize_t snpoints; /* # of points in dataspace extent (signed) */
+ hsize_t npoints; /* # of points in dataspace extent */
+ unsigned write_elem, read_elem; /* Element written/read */
+ hsize_t u; /* Local index variable */
+
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (swmr ? H5F_ACC_SWMR_WRITE : 0), H5P_DEFAULT, my_fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Create n-D dataspace */
+ fill = EARRAY_DSET_DIM;
+ H5VM_array_fill(dim, &fill, sizeof(fill), EARRAY_MAX_RANK);
+ fill = EARRAY_DSET_DIM;
+ H5VM_array_fill(max_dim, &fill, sizeof(fill), EARRAY_MAX_RANK);
+ max_dim[unlim_dim] = H5S_UNLIMITED;
+ fill = EARRAY_DSET_DIM;
+ H5VM_array_fill(swizzled_dim, &fill, sizeof(fill), EARRAY_MAX_RANK);
+ if((sid = H5Screate_simple((int)ndims, dim, max_dim)) < 0) FAIL_STACK_ERROR
+
+ /* Get the number of points in the dataspace */
+ if((snpoints = H5Sget_simple_extent_npoints(sid)) < 0) FAIL_STACK_ERROR
+ npoints = (hsize_t)snpoints;
+
+ /* Compute the "down" dimension values */
+ if(H5VM_array_down(ndims, dim, down) < 0) FAIL_STACK_ERROR
+
+ /* Create chunked dataset */
+ if((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type expected depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST || swmr) {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index");
+ } /* end if */
+ else {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Fill existing elements */
+ for(u = 0; u < npoints; u++) {
+ /* Compute the coordinate from the linear offset */
+ if(H5VM_array_calc_pre(u, ndims, down, hs_offset) < 0) FAIL_STACK_ERROR
+
+ /* Un-swizzle hyperslab offset in same way as swizzled dimensions */
+ H5VM_unswizzle_coords(hsize_t, hs_offset, unlim_dim);
+
+ /* Select a single element in the dataset */
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) FAIL_STACK_ERROR
+
+ /* Read (unwritten) element from dataset */
+ read_elem = 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify unwritten element is fill value (0) */
+ if(read_elem != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
+
+ /* Write element to dataset */
+ write_elem = (unsigned)u;
+ if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR
+
+ /* Read element from dataset */
+ read_elem = write_elem + 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify written element is read in */
+ if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read");
+ } /* end for */
+
+ /* Incrementally extend dataset and verify write/reads */
+ while(dim[unlim_dim] < EARRAY_MAX_EXTEND) {
+ hssize_t snew_npoints; /* # of points in dataspace extent (signed) */
+ hsize_t new_npoints; /* # of points in dataspace extent */
+
+ /* Extend dataset */
+ dim[unlim_dim] += EARRAY_EXTEND_INCR;
+ swizzled_dim[0] += EARRAY_EXTEND_INCR;
+ if(H5Dset_extent(dsid, dim) < 0) FAIL_STACK_ERROR
+
+ /* Close old dataspace */
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+
+ /* Get dataspace for dataset now */
+ if((sid = H5Dget_space(dsid)) < 0) FAIL_STACK_ERROR
+
+ /* Get the new number of points in the dataspace */
+ if((snew_npoints = H5Sget_simple_extent_npoints(sid)) < 0) FAIL_STACK_ERROR
+ new_npoints = (hsize_t)snew_npoints;
+
+ /* Fill new elements */
+ for(u = npoints; u < new_npoints; u++) {
+ /* Compute the coordinate from the linear offset */
+ if(H5VM_array_calc(u, ndims, swizzled_dim, hs_offset) < 0) FAIL_STACK_ERROR
+
+ /* Un-swizzle hyperslab offset in same way as swizzled dimensions */
+ H5VM_unswizzle_coords(hsize_t, hs_offset, unlim_dim);
+
+ /* Select a single element in the dataset */
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) FAIL_STACK_ERROR
+
+ /* Read (unwritten) element from dataset */
+ read_elem = 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify unwritten element is fill value (0) */
+ if(read_elem != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
+
+ /* Write element to dataset */
+ write_elem = (unsigned)u;
+ if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR
+
+ /* Read element from dataset */
+ read_elem = write_elem + 1;
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify written element is read in */
+ if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read");
+ } /* end for */
+
+ /* Update the number of points in the dataspace */
+ npoints = new_npoints;
+ } /* end while */
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+
+ /* Re-open file & dataset */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | (swmr ? H5F_ACC_SWMR_READ : 0), my_fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Open dataset */
+ if((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index tyepe expected depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST || swmr) {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index");
+ } /* end if */
+ else {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Get dataspace for dataset now */
+ if((sid = H5Dget_space(dsid)) < 0) FAIL_STACK_ERROR
+
+ /* Get the number of points in the dataspace */
+ if((snpoints = H5Sget_simple_extent_npoints(sid)) < 0) FAIL_STACK_ERROR
+ npoints = (hsize_t)snpoints;
+
+ /* Get the current dimensions into swizzled_dim array */
+ if(H5Sget_simple_extent_dims(sid, swizzled_dim, NULL) < 0) FAIL_STACK_ERROR
+
+ /* Generate the swizzled dimensions */
+ H5VM_swizzle_coords(hsize_t, swizzled_dim, unlim_dim);
+
+ /* Compute the "down" dimension values */
+ if(H5VM_array_down(ndims, swizzled_dim, down) < 0) FAIL_STACK_ERROR
+
+ /* Read elements */
+ for(u = 0; u < npoints; u++) {
+ /* Compute the coordinate from the linear offset */
+ if(H5VM_array_calc_pre(u, ndims, down, hs_offset) < 0) FAIL_STACK_ERROR
+
+ /* Unswizzle hyperslab offset in same way as swizzled dimensions */
+ H5VM_unswizzle_coords(hsize_t, hs_offset, unlim_dim);
+
+ /* Select a single element in the dataset */
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) FAIL_STACK_ERROR
+
+ /* Read written element from dataset */
+ read_elem = (unsigned)(u + 1);
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+
+ /* Verify written element is correct */
+ if(read_elem != u) FAIL_PUTS_ERROR("invalid element read");
+ } /* end for */
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+
+ /* Re-open file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, my_fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Delete dataset */
+ if(H5Ldelete(fid, "dset", H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+
+ /* Close everything */
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+ } /* end for */
+
+ /* Close everything */
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ } /* end for */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Close everything */
+ if(H5Sclose(scalar_sid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(my_fapl) < 0) FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Sclose(scalar_sid);
+ H5Fclose(fid);
+ H5Pclose(my_fapl);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_chunk_fast() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_reopen_chunk_fast
+ *
+ * Purpose: To verify a bug in extensible arrays as chunk index.
+ * When the dataset is closed in H5D_close(), the pointer
+ * to the extensible array struct in the layout message
+ * is copied via H5D_flush_real() before H5D_chunk_dest().
+ * This causes an abort from "Assertion `ea->hdr' failed."
+ * later when the dataset is re-opened and read.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Vailin Choi
+ * April 13, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_reopen_chunk_fast(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t scalar_sid = -1;/* Scalar dataspace ID */
+ hid_t dsid = -1; /* Dataset ID */
+ hsize_t dim, max_dim, chunk_dim; /* Dataset and chunk dimensions */
+ hsize_t hs_offset; /* Hyperslab offset */
+ hsize_t hs_size; /* Hyperslab size */
+ H5D_alloc_time_t alloc_time; /* Storage allocation time */
+ unsigned write_elem, read_elem; /* Element written/read */
+ unsigned u; /* Local index variable */
+
+ TESTING("datasets w/extensible array open/reopen with read/write");
+
+ h5_fixname(FILENAME[10], fapl, filename, sizeof filename);
+
+ /* Loop over storage allocation time */
+ for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) {
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Set chunking */
+ chunk_dim = 10;
+ if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR
+
+ /* Set fill time */
+ if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
+
+ /* Set allocation time */
+ if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+
+ /* Create scalar dataspace */
+ if((scalar_sid = H5Screate(H5S_SCALAR)) < 0) FAIL_STACK_ERROR
+
+ /* Create 1-D dataspace */
+ dim = 100;
+ max_dim = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(1, &dim, &max_dim)) < 0) FAIL_STACK_ERROR
+
+ /* Create chunked dataset */
+ if((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Fill existing elements */
+ hs_size = 1;
+ for(u = 0; u < 100; u++) {
+ /* Select a single element in the dataset */
+ hs_offset = u;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0)
+ FAIL_STACK_ERROR
+ /* Write element to dataset */
+ write_elem = u;
+ if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0)
+ FAIL_STACK_ERROR
+ } /* end for */
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+
+ /* Reopen the dataset */
+ if((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+ hs_size = 1;
+
+ /* Read from dataset */
+ for(u = 0; u < 100; u++) {
+ /* Select a single element in the dataset */
+ hs_offset = u;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0)
+ FAIL_STACK_ERROR
+
+ /* Read element from dataset */
+ if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0)
+ FAIL_STACK_ERROR
+ } /* end for */
+
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(scalar_sid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ } /* end for */
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Sclose(scalar_sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_reopen_chunk_fast() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_chunk_fast_bug1
+ *
+ * Purpose: Test extensible arrays where the first dimension in the
+ * chunk size is the same as the second dimension in the
+ * dataset size. This helps to confirm that all dimensions
+ * are being "swizzled" correctly in the earray chunk index
+ * code.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * March 22, 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_chunk_fast_bug1(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dsid = -1; /* Dataset ID */
+ hsize_t dim[2], max_dim[2], chunk_dim[2]; /* Dataset and chunk dimensions */
+ H5D_alloc_time_t alloc_time; /* Storage allocation time */
+ static unsigned wbuf[40][20], rbuf[40][20]; /* Element written/read */
+ unsigned i, j; /* Local index variables */
+
+ TESTING("datasets w/extensible array chunk indexing bug");
+
+ h5_fixname(FILENAME[10], fapl, filename, sizeof filename);
+
+ /* Initialize write buffer */
+ for(i=0; i<40; i++)
+ for(j=0; j<20; j++)
+ wbuf[i][j] = (i * 20) + j;
+
+ /* Create 2-D dataspace */
+ dim[0] = 40;
+ dim[1] = 20;
+ max_dim[0] = 40;
+ max_dim[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dim, max_dim)) < 0) FAIL_STACK_ERROR
+
+ /* Loop over storage allocation time */
+ for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) {
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Set chunking */
+ chunk_dim[0] = 20;
+ chunk_dim[1] = 10;
+ if(H5Pset_chunk(dcpl, 2, chunk_dim) < 0) FAIL_STACK_ERROR
+
+ /* Set allocation time */
+ if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+
+ /* Create chunked dataset */
+ if((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Write buffer to dataset */
+ if(H5Dwrite(dsid, H5T_NATIVE_UINT, sid, sid, H5P_DEFAULT, &wbuf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+
+ /* Reopen the dataset */
+ if((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Read from dataset */
+ if(H5Dread(dsid, H5T_NATIVE_UINT, sid, sid, H5P_DEFAULT, &rbuf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify read data */
+ for(i=0; i<40; i++)
+ for(j=0; j<20; j++)
+ if(wbuf[i][j] != rbuf[i][j])
+ FAIL_PUTS_ERROR("invalid element read");
+
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ } /* end for */
+
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_chunk_fast_bug1() */
+
/* This message derives from H5Z */
const H5Z_class2_t H5Z_EXPAND[1] = {{
H5Z_CLASS_T_VERS, /* H5Z_class_t version */
@@ -7583,28 +8718,33 @@ static herr_t
test_chunk_expand(hid_t fapl)
{
char filename[FILENAME_BUF_SIZE];
- hid_t fid = -1; /* File ID */
- hid_t dcpl = -1; /* Dataset creation property list ID */
- hid_t sid = -1; /* Dataspace ID */
- hid_t scalar_sid = -1;/* Scalar dataspace ID */
- hid_t dsid = -1; /* Dataset ID */
- hsize_t dim, max_dim, chunk_dim; /* Dataset and chunk dimensions */
- hsize_t hs_offset; /* Hyperslab offset */
- hsize_t hs_size; /* Hyperslab size */
- H5D_alloc_time_t alloc_time; /* Storage allocation time */
- unsigned write_elem, read_elem; /* Element written/read */
- unsigned u; /* Local index variable */
- size_t size; /* Size of type */
- herr_t status; /* Generic return value */
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1, dcpl2 = -1; /* Dataset creation property list ID */
+ hid_t sid = -1, sid2 = -1; /* Dataspace ID */
+ hid_t scalar_sid = -1; /* Scalar dataspace ID */
+ hid_t dsid = -1, dsid2 = -1; /* Dataset ID */
+ hsize_t dim, max_dim, chunk_dim; /* Dataset and chunk dimensions */
+ hsize_t dim2[2], max_dim2[2], chunk_dim2[2]; /* Dataset and chunk dimensions */
+ H5D_chunk_index_t idx_type, idx_type2; /* Dataset chunk index type */
+ H5F_libver_t low, high; /* File format bounds */
+ hsize_t hs_offset, hs_offset2[2]; /* Hyperslab offset */
+ hsize_t hs_size, hs_size2[2]; /* Hyperslab size */
+ H5D_alloc_time_t alloc_time; /* Storage allocation time */
+ unsigned write_elem, read_elem; /* Element written/read */
+ unsigned write_elem2, read_elem2; /* Element written/read */
+ unsigned u; /* Local index variable */
+ herr_t status; /* Generic return value */
TESTING("filter expanding chunks too much");
- h5_fixname(FILENAME[10], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[11], fapl, filename, sizeof filename);
+
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(fapl, &low, &high) < 0) FAIL_STACK_ERROR
- size = sizeof(size_t);
- if(size <= 4) {
+ if(sizeof(size_t) <= 4 && low != H5F_LIBVER_LATEST) {
SKIPPED();
- puts(" Current machine can't test for error");
+ puts(" Current machine can't test for error w/old file format");
} /* end if */
else {
/* Register "expansion" filter */
@@ -7615,34 +8755,41 @@ test_chunk_expand(hid_t fapl)
/* Loop over storage allocation time */
for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; H5_INC_ENUM(H5D_alloc_time_t, alloc_time)) {
+
/* Create file */
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
/* Create dataset creation property list */
if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+ if((dcpl2 = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
/* Set chunking */
- chunk_dim = 10;
+ chunk_dim = chunk_dim2[0] = chunk_dim2[1] = 10;
if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl2, 2, chunk_dim2) < 0) FAIL_STACK_ERROR
/* Set fill time */
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
+ if(H5Pset_fill_time(dcpl2, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
/* Set allocation time */
if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+ if(H5Pset_alloc_time(dcpl2, alloc_time) < 0) FAIL_STACK_ERROR
/* Set "expand" filter */
if(H5Pset_filter(dcpl, H5Z_FILTER_EXPAND, 0, (size_t)0, NULL) < 0) FAIL_STACK_ERROR
+ if(H5Pset_filter(dcpl2, H5Z_FILTER_EXPAND, 0, (size_t)0, NULL) < 0) FAIL_STACK_ERROR
/* Create scalar dataspace */
if((scalar_sid = H5Screate(H5S_SCALAR)) < 0) FAIL_STACK_ERROR
- /* Create 1-D dataspace */
- dim = 100;
- max_dim = H5S_UNLIMITED;
+ /* Create 1-D and 2-D dataspace */
+ dim = dim2[0] = dim2[1] = 100;
+ max_dim = max_dim2[0] = max_dim2[1] = H5S_UNLIMITED;
if((sid = H5Screate_simple(1, &dim, &max_dim)) < 0) FAIL_STACK_ERROR
+ if((sid2 = H5Screate_simple(2, dim2, max_dim2)) < 0) FAIL_STACK_ERROR
- /* Create chunked dataset */
+ /* Create 1-D & 2-D chunked datasets */
if(H5D_ALLOC_TIME_EARLY == alloc_time) {
/* Make the expansion factor large enough to cause failure right away */
filter_expand_factor_g = 8;
@@ -7651,176 +8798,272 @@ test_chunk_expand(hid_t fapl)
dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
} H5E_END_TRY;
if(dsid >= 0) FAIL_PUTS_ERROR("should fail to create dataset when allocation time is early");
+
+ H5E_BEGIN_TRY {
+ dsid2 = H5Dcreate2(fid, "dset2", H5T_NATIVE_UINT, sid2, H5P_DEFAULT, dcpl2, H5P_DEFAULT);
+ } H5E_END_TRY;
+ if(dsid2 >= 0) FAIL_PUTS_ERROR("should fail to create dataset when allocation time is early");
+
} /* end if */
else {
if((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR
+ if((dsid2 = H5Dcreate2(fid, "dset2", H5T_NATIVE_UINT, sid2, H5P_DEFAULT, dcpl2, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR
+ if(H5D__layout_idx_type_test(dsid2, &idx_type2) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type expected depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index");
+ if(idx_type2 != H5D_CHUNK_IDX_BT2) FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+ } /* end if */
+ else {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ if(idx_type2 != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
/* Fill elements */
- hs_size = 1;
+ hs_size = hs_size2[0] = hs_size2[1] = 1;
for(u = 0; u < 100; u++) {
- /* Select a single element in the dataset */
- hs_offset = u;
+
+ hs_offset = hs_offset2[0] = hs_offset2[1] = u;
+
+ /* Select a single element in the 1-D dataset */
if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) FAIL_STACK_ERROR
+ /* Select a single element in the 2-D dataset; NOT every element is selected */
+ if(H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_offset2, NULL, hs_size2, NULL) < 0) FAIL_STACK_ERROR
+
/* Read (unwritten) element from dataset */
- read_elem = 1;
+ read_elem = read_elem2 = 1;
if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) FAIL_STACK_ERROR
/* Verify unwritten element is fill value (0) */
if(read_elem != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
+ if(read_elem2 != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
/* Don't expand chunks yet */
filter_expand_factor_g = 0;
- /* Write element to dataset */
- write_elem = u;
+ /* Write element to the datasets */
+ write_elem = write_elem2 = u;
if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2) < 0) FAIL_STACK_ERROR
- /* Read element from dataset */
+ /* Read element from the datasets */
read_elem = write_elem + 1;
+ read_elem2 = write_elem2 + 1;
if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) FAIL_STACK_ERROR
/* Verify written element is read in */
if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read");
+ if(read_elem2 != write_elem2) FAIL_PUTS_ERROR("invalid written element read");
/* Expand chunks now */
filter_expand_factor_g = 8;
- /* Write element to dataset */
- write_elem = u;
+ /* Write element to the datasets */
+ write_elem = write_elem2 = u;
H5E_BEGIN_TRY {
status = H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem);
} H5E_END_TRY;
if(status >= 0) FAIL_PUTS_ERROR("should fail to write to dataset when allocation time is not early");
+
+ H5E_BEGIN_TRY {
+ status = H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2);
+ } H5E_END_TRY;
+ if(status >= 0) FAIL_PUTS_ERROR("should fail to write to dataset when allocation time is not early");
} /* end for */
/* Incrementally extend dataset and verify write/reads */
while(dim < 1000) {
- /* Extend dataset */
+ /* Extend the datasets */
dim += 100;
+ dim2[0] += 100;
+ dim2[1] += 100;
if(H5Dset_extent(dsid, &dim) < 0) FAIL_STACK_ERROR
+ if(H5Dset_extent(dsid2, dim2) < 0) FAIL_STACK_ERROR
/* Close old dataspace */
if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid2) < 0) FAIL_STACK_ERROR
- /* Get dataspace for dataset now */
+ /* Get dataspace for the datasets now */
if((sid = H5Dget_space(dsid)) < 0) FAIL_STACK_ERROR
+ if((sid2 = H5Dget_space(dsid2)) < 0) FAIL_STACK_ERROR
/* Fill new elements */
- hs_size = 1;
+ hs_size = hs_size2[0] = hs_size2[1] = 1;
for(u = 0; u < 100; u++) {
- /* Select a single element in the dataset */
+ /* Select a single element in the datasets */
hs_offset = (dim + u) - 100;
+ hs_offset2[0] = (dim2[0] + u) - 100;
+ hs_offset2[1] = (dim2[1] + u) - 100;
if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) FAIL_STACK_ERROR
+ if(H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_offset2, NULL, hs_size2, NULL) < 0) FAIL_STACK_ERROR
- /* Read (unwritten) element from dataset */
- read_elem = 1;
+ /* Read (unwritten) element from the datasets */
+ read_elem = read_elem2 = 1;
if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) FAIL_STACK_ERROR
/* Verify unwritten element is fill value (0) */
if(read_elem != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
+ if(read_elem2 != 0) FAIL_PUTS_ERROR("invalid unwritten element read");
/* Don't expand chunks yet */
filter_expand_factor_g = 0;
- /* Write element to dataset */
- write_elem = u;
+ /* Write element to the datasets */
+ write_elem = write_elem2 = u;
if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2) < 0) FAIL_STACK_ERROR
- /* Read element from dataset */
+ /* Read element from the datasets */
read_elem = write_elem + 1;
+ read_elem2 = write_elem2 + 1;
if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) FAIL_STACK_ERROR
/* Verify written element is read in */
if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read");
+ if(read_elem2 != write_elem2) FAIL_PUTS_ERROR("invalid written element read");
/* Expand chunks now */
filter_expand_factor_g = 8;
- /* Write element to dataset */
- write_elem = u;
+ /* Write element to the datasets */
+ write_elem = write_elem2 = u;
H5E_BEGIN_TRY {
status = H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem);
} H5E_END_TRY;
if(status >= 0) FAIL_PUTS_ERROR("should fail to write to dataset when allocation time is not early");
+
+ H5E_BEGIN_TRY {
+ status = H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2);
+ } H5E_END_TRY;
+ if(status >= 0) FAIL_PUTS_ERROR("should fail to write to dataset when allocation time is not early");
} /* end for */
} /* end while */
- /* Close dataset */
+ /* Close the datasets */
if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(dsid2) < 0) FAIL_STACK_ERROR
} /* end else */
/* Close everything */
if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid2) < 0) FAIL_STACK_ERROR
if(H5Sclose(scalar_sid) < 0) FAIL_STACK_ERROR
if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl2) < 0) FAIL_STACK_ERROR
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
/* If the dataset was created, do some extra testing */
if(H5D_ALLOC_TIME_EARLY != alloc_time) {
- /* Re-open file & dataset */
+ /* Re-open file & datasets */
if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) FAIL_STACK_ERROR
- /* Open dataset */
+ /* Open the datasets */
if((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+ if((dsid2 = H5Dopen2(fid, "dset2", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Get the chunk index type for the two datasets */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR
+ if(H5D__layout_idx_type_test(dsid2, &idx_type2) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type expected depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_EARRAY) FAIL_PUTS_ERROR("should be using extensible array as index");
+ if(idx_type2 != H5D_CHUNK_IDX_BT2) FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+ } /* end if */
+ else {
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ if(idx_type2 != H5D_CHUNK_IDX_BTREE) FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
/* Create scalar dataspace */
if((scalar_sid = H5Screate(H5S_SCALAR)) < 0) FAIL_STACK_ERROR
- /* Get dataspace for dataset now */
+ /* Get dataspace for the datasets now */
if((sid = H5Dget_space(dsid)) < 0) FAIL_STACK_ERROR
+ if((sid2 = H5Dget_space(dsid2)) < 0) FAIL_STACK_ERROR
/* Read elements */
- hs_size = 1;
+ hs_size = hs_size2[0] = hs_size2[1] = 1;
for(u = 0; u < 1000; u++) {
- /* Select a single element in the dataset */
- hs_offset = u;
+ /* Select a single element in the datasets */
+ hs_offset = hs_offset2[0] = hs_offset2[1] = u;
if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, &hs_offset, NULL, &hs_size, NULL) < 0) FAIL_STACK_ERROR
+ if(H5Sselect_hyperslab(sid2, H5S_SELECT_SET, hs_offset2, NULL, hs_size2, NULL) < 0) FAIL_STACK_ERROR
- /* Read element from dataset */
- read_elem = u + 1;
+ /* Read element from the datasets */
+ read_elem = read_elem2 = u + 1;
if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) FAIL_STACK_ERROR
/* Verify unwritten element is proper value */
if(read_elem != (u % 100)) FAIL_PUTS_ERROR("invalid element read");
+ if(read_elem2 != (u % 100)) FAIL_PUTS_ERROR("invalid element read");
/* Don't expand chunks yet */
filter_expand_factor_g = 0;
- /* Write element to dataset */
- write_elem = u % 100;
+ /* Write element to the datasets */
+ write_elem = write_elem2 = u % 100;
if(H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2) < 0) FAIL_STACK_ERROR
- /* Read element from dataset */
+ /* Read element from the datasets */
read_elem = write_elem + 1;
+ read_elem2 = write_elem2 + 1;
if(H5Dread(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &read_elem) < 0) FAIL_STACK_ERROR
+ if(H5Dread(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &read_elem2) < 0) FAIL_STACK_ERROR
/* Verify written element is read in */
if(read_elem != write_elem) FAIL_PUTS_ERROR("invalid written element read");
+ if(read_elem2 != write_elem2) FAIL_PUTS_ERROR("invalid written element read");
/* Expand chunks now */
filter_expand_factor_g = 8;
- /* Write element to dataset */
- write_elem = u % 100;
+ /* Write element to the datasets */
+ write_elem = write_elem2 = u % 100;
H5E_BEGIN_TRY {
status = H5Dwrite(dsid, H5T_NATIVE_UINT, scalar_sid, sid, H5P_DEFAULT, &write_elem);
} H5E_END_TRY;
if(status >= 0) FAIL_PUTS_ERROR("should fail to write to dataset when allocation time is not early");
+
+ H5E_BEGIN_TRY {
+ status = H5Dwrite(dsid2, H5T_NATIVE_UINT, scalar_sid, sid2, H5P_DEFAULT, &write_elem2);
+ } H5E_END_TRY;
+ if(status >= 0) FAIL_PUTS_ERROR("should fail to write to dataset when allocation time is not early");
} /* end for */
/* Close everything */
if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid2) < 0) FAIL_STACK_ERROR
if(H5Sclose(scalar_sid) < 0) FAIL_STACK_ERROR
if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Dclose(dsid2) < 0) FAIL_STACK_ERROR
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
/* Re-open file */
if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0) FAIL_STACK_ERROR
- /* Delete dataset */
+ /* Delete the datasets */
if(H5Ldelete(fid, "dset", H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+ if(H5Ldelete(fid, "dset2", H5P_DEFAULT) < 0) FAIL_STACK_ERROR
/* Close everything */
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
@@ -7841,8 +9084,11 @@ test_chunk_expand(hid_t fapl)
error:
H5E_BEGIN_TRY {
H5Pclose(dcpl);
+ H5Pclose(dcpl2);
H5Dclose(dsid);
+ H5Dclose(dsid2);
H5Sclose(sid);
+ H5Sclose(sid2);
H5Sclose(scalar_sid);
H5Fclose(fid);
} H5E_END_TRY;
@@ -7851,6 +9097,863 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_fixed_array
+ *
+ * Purpose: Tests support for Fixed Array and Implicit Indexing
+ *
+ * Create the following 3 datasets:
+ * 1) extendible chunked dataset with fixed max. dims
+ * 2) extendible chunked dataset with NULL max. dims
+ * 3) extendible chunked dataset with same max. dims
+ * (Note that the third dataset is created with bigger size for curr & max. dims
+ * so that Fixed Array Indexing with paging is exercised)
+ *
+ * Repeat the following test with/without compression filter
+ * Repeat the following test with H5D_ALLOC_TIME_EARLY/H5D_ALLOC_TIME_LATE/H5D_ALLOC_TIME_INCR
+ * For the old format,
+ * verify that v1 btree indexing type is used for
+ * all 3 datasets with all settings
+ * For the new format:
+ * Verify that Implicit Index type is used for
+ * #1, #2, #3 datasets when ALLOC_TIME_EARLY and compression are true
+ * Verify Fixed Array indexing type is used for
+ * #1, #2, #3 datasets with all other settings
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Vailin Choi; 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_fixed_array(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE]; /* File name */
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+
+ hid_t sid = -1; /* Dataspace ID for dataset with fixed dimensions */
+ hid_t sid_big = -1; /* Dataspate ID for big dataset */
+ hid_t sid_max = -1; /* Dataspace ID for dataset with maximum dimensions set */
+
+ hid_t dsid = -1; /* Dataset ID for dataset with fixed dimensions */
+ hid_t dsid_big = -1; /* Dataset ID for big dataset with fixed dimensions */
+ hid_t dsid_max = -1; /* Dataset ID for dataset with maximum dimensions set */
+
+ hsize_t dim2[2] = {48, 18}; /* Dataset dimensions */
+ hsize_t dim2_big[2] = {500, 60}; /* Big dataset dimensions */
+ hsize_t dim2_max[2] = {120, 50}; /* Maximum dataset dimensions */
+
+ hid_t mem_id; /* Memory space ID */
+ hid_t big_mem_id; /* Memory space ID for big dataset */
+
+ hsize_t msize[1] = {POINTS}; /* Size of memory space */
+ hsize_t msize_big[1] = {POINTS_BIG}; /* Size of memory space for big dataset */
+
+ int wbuf[POINTS]; /* write buffer */
+ int wbuf_big[POINTS_BIG]; /* write buffer for big dataset */
+ int rbuf[POINTS]; /* read buffer */
+ int rbuf_big[POINTS_BIG]; /* read buffer for big dataset */
+
+ hsize_t chunk_dim2[2] = {4, 3}; /* Chunk dimensions */
+ int chunks[12][6]; /* # of chunks for dataset dimensions */
+ int chunks_big[125][20]; /* # of chunks for big dataset dimensions */
+ int chunk_row; /* chunk row index */
+ int chunk_col; /* chunk column index */
+
+ hsize_t coord[POINTS][2]; /* datdaset coordinates */
+ hsize_t coord_big[POINTS_BIG][2]; /* big dataset coordinates */
+
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ H5F_libver_t low, high; /* File format bounds */
+ H5D_alloc_time_t alloc_time; /* Storage allocation time */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ unsigned compress; /* Whether chunks should be compressed */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ h5_stat_size_t empty_size; /* Size of an empty file */
+ h5_stat_size_t file_size; /* Size of each file created */
+
+ size_t i, j; /* local index variables */
+ herr_t ret; /* Generic return value */
+
+ TESTING("datasets w/fixed array as chunk index");
+
+ h5_fixname(FILENAME[12], fapl, filename, sizeof filename);
+
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(fapl, &low, &high) < 0) FAIL_STACK_ERROR
+
+ /* Create and close the file to get the file size */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ STACK_ERROR
+
+ /* Get the size of the empty file */
+ if((empty_size = h5_get_file_size(filename, fapl)) < 0)
+ TEST_ERROR
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Loop over compressing chunks */
+ for(compress = FALSE; compress <= TRUE; compress++) {
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Loop over storage allocation time */
+ for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) {
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Set chunking */
+ if((ret = H5Pset_chunk(dcpl, 2, chunk_dim2)) < 0)
+ FAIL_PUTS_ERROR(" Problem with setting chunk.")
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Check if we should compress the chunks */
+ if(compress)
+ if(H5Pset_deflate(dcpl, 9) < 0) FAIL_STACK_ERROR
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Set fill time */
+ if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
+
+ /* Set allocation time */
+ if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+
+ /* Initialization of chunk array for repeated coordinates */
+ for(i = 0; i < dim2[0]/chunk_dim2[0]; i++)
+ for(j = 0; j < dim2[1]/chunk_dim2[1]; j++)
+ chunks[i][j] = 0;
+
+ /* Generate random point coordinates. Only one point is selected per chunk */
+ for(i = 0; i < POINTS; i++){
+ do {
+ chunk_row = (int)HDrandom () % (int)(dim2[0]/chunk_dim2[0]);
+ chunk_col = (int)HDrandom () % (int)(dim2[1]/chunk_dim2[1]);
+ } while (chunks[chunk_row][chunk_col]);
+
+ wbuf[i] = chunks[chunk_row][chunk_col] = chunk_row+chunk_col+1;
+ coord[i][0] = (hsize_t)chunk_row * chunk_dim2[0];
+ coord[i][1] = (hsize_t)chunk_col * chunk_dim2[1];
+ } /* end for */
+
+ /* Create first dataset with cur and max dimensions */
+ if((sid_max = H5Screate_simple(2, dim2, dim2_max)) < 0) FAIL_STACK_ERROR
+ dsid_max = H5Dcreate2(fid, DSET_FIXED_MAX, H5T_NATIVE_INT, sid_max, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ if(dsid_max < 0)
+ FAIL_PUTS_ERROR(" Creating Chunked Dataset with maximum dimensions.")
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(dsid_max, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ if(alloc_time == H5D_ALLOC_TIME_EARLY
+#ifdef H5_HAVE_FILTER_DEFLATE
+ && !compress
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ ) {
+ if(idx_type != H5D_CHUNK_IDX_NONE)
+ FAIL_PUTS_ERROR("should be using Non Index as index");
+ } else if (idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using Fixed Array as index");
+ } /* end if */
+ else {
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Create dataspace for write buffer */
+ if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for writing */
+ if(H5Sselect_elements(sid_max, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0)
+ TEST_ERROR;
+
+ /* Write into dataset */
+ if(H5Dwrite(dsid_max, H5T_NATIVE_INT, mem_id, sid_max, H5P_DEFAULT, wbuf) < 0) TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(dsid_max) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid_max) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR
+
+
+ /* Create second dataset with curr dim but NULL max dim */
+ if((sid = H5Screate_simple(2, dim2, NULL)) < 0) FAIL_STACK_ERROR
+ dsid = H5Dcreate2(fid, DSET_FIXED_NOMAX, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ if(dsid < 0)
+ FAIL_PUTS_ERROR(" Creating Chunked Dataset.")
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ if(alloc_time == H5D_ALLOC_TIME_EARLY
+#ifdef H5_HAVE_FILTER_DEFLATE
+ && !compress
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ ) {
+ if(idx_type != H5D_CHUNK_IDX_NONE)
+ FAIL_PUTS_ERROR("should be using Non Index as index");
+ } else if(idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using Fixed Array as index");
+ } else {
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Create dataspace for write buffer */
+ if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for writing */
+ if(H5Sselect_elements(sid, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0)
+ TEST_ERROR;
+
+ /* Write into dataset */
+ if(H5Dwrite(dsid, H5T_NATIVE_INT, mem_id, sid, H5P_DEFAULT, wbuf) < 0) TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR
+
+ /* Create the third dataset with bigger size and both curr & max dimensions are the same */
+ if((sid_big = H5Screate_simple(2, dim2_big, dim2_big)) < 0) FAIL_STACK_ERROR
+ dsid_big = H5Dcreate2(fid, DSET_FIXED_BIG, H5T_NATIVE_INT, sid_big, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ if(dsid_big < 0)
+ FAIL_PUTS_ERROR(" Creating Big Chunked Dataset.")
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(dsid_big, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ if(alloc_time == H5D_ALLOC_TIME_EARLY
+#ifdef H5_HAVE_FILTER_DEFLATE
+ && !compress
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ ) {
+ if(idx_type != H5D_CHUNK_IDX_NONE)
+ FAIL_PUTS_ERROR("should be using Non Index as index");
+ } else if(idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using Fixed Array as index");
+ } /* end if */
+ else {
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Initialization of chunk array for repeated coordinates */
+ for(i = 0; i < dim2_big[0]/chunk_dim2[0]; i++)
+ for(j = 0; j < dim2_big[1]/chunk_dim2[1]; j++)
+ chunks_big[i][j] = 0;
+
+ /* Generate random point coordinates. Only one point is selected per chunk */
+ for(i = 0; i < POINTS_BIG; i++){
+ do {
+ chunk_row = (int)HDrandom () % (int)(dim2_big[0]/chunk_dim2[0]);
+ chunk_col = (int)HDrandom () % (int)(dim2_big[1]/chunk_dim2[1]);
+ } while (chunks_big[chunk_row][chunk_col]);
+
+ wbuf_big[i] = chunks_big[chunk_row][chunk_col] = chunk_row+chunk_col+1;
+ coord_big[i][0] = (hsize_t)chunk_row * chunk_dim2[0];
+ coord_big[i][1] = (hsize_t)chunk_col * chunk_dim2[1];
+ } /* end for */
+
+ /* Create dataspace for write buffer */
+ if((big_mem_id = H5Screate_simple(1, msize_big, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for writing */
+ if(H5Sselect_elements(sid_big, H5S_SELECT_SET, POINTS_BIG, (const hsize_t *)coord_big) < 0)
+ TEST_ERROR;
+
+ /* Write into dataset */
+ if(H5Dwrite(dsid_big, H5T_NATIVE_INT, big_mem_id, sid_big, H5P_DEFAULT, wbuf_big) < 0) TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(dsid_big) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid_big) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(big_mem_id) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+
+ /* Open the first dataset */
+ if((dsid = H5Dopen2(fid, DSET_FIXED_MAX, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Get dataset dataspace */
+ if((sid = H5Dget_space(dsid)) < 0) TEST_ERROR;
+
+ /* Create dataspace for read buffer */
+ if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for reading */
+ if(H5Sselect_elements (sid, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0) TEST_ERROR;
+
+ /* Read from dataset */
+ if(H5Dread(dsid, H5T_NATIVE_INT, mem_id, sid, H5P_DEFAULT, rbuf) < 0) TEST_ERROR;
+
+ /* Verify that written and read data are the same */
+ for(i = 0; i < POINTS; i++)
+ if(rbuf[i] != wbuf[i]){
+ printf(" Line %d: Incorrect value, wbuf[%u]=%d, rbuf[%u]=%d\n",
+ __LINE__,(unsigned)i,wbuf[i],(unsigned)i,rbuf[i]);
+ TEST_ERROR;
+ } /* end if */
+
+ /* Closing */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR
+
+ /* Open the second dataset */
+ if((dsid = H5Dopen2(fid, DSET_FIXED_NOMAX, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Get dataset dataspace */
+ if((sid = H5Dget_space(dsid)) < 0) TEST_ERROR;
+
+ /* Create dataspace for read buffer */
+ if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for reading */
+ if(H5Sselect_elements (sid, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0) TEST_ERROR;
+
+ /* Read from dataset */
+ if(H5Dread(dsid, H5T_NATIVE_INT, mem_id, sid, H5P_DEFAULT, rbuf) < 0) TEST_ERROR;
+
+ /* Verify that written and read data are the same */
+ for(i = 0; i < POINTS; i++)
+ if(rbuf[i] != wbuf[i]){
+ printf(" Line %d: Incorrect value, wbuf[%u]=%d, rbuf[%u]=%d\n",
+ __LINE__,(unsigned)i,wbuf[i],(unsigned)i,rbuf[i]);
+ TEST_ERROR;
+ } /* end if */
+
+ /* Closing */
+ if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR
+
+ /* Open the third dataset */
+ if((dsid_big = H5Dopen2(fid, DSET_FIXED_BIG, H5P_DEFAULT)) < 0) TEST_ERROR;
+ /* Get dataset dataspace */
+ if((sid_big = H5Dget_space(dsid_big)) < 0) TEST_ERROR;
+
+ /* Create dataspace for read buffer */
+ if((big_mem_id = H5Screate_simple(1, msize_big, NULL)) < 0) TEST_ERROR;
+
+ /* Select the random points for reading */
+ if(H5Sselect_elements (sid_big, H5S_SELECT_SET, POINTS_BIG, (const hsize_t *)coord_big) < 0) TEST_ERROR;
+ /* Read from dataset */
+ if(H5Dread(dsid_big, H5T_NATIVE_INT, big_mem_id, sid_big, H5P_DEFAULT, rbuf_big) < 0) TEST_ERROR;
+
+ /* Verify that written and read data are the same */
+ for(i = 0; i < POINTS_BIG; i++)
+ if(rbuf_big[i] != wbuf_big[i]){
+ printf(" Line %d: Incorrect value, wbuf_bif[%u]=%d, rbuf_big[%u]=%d\n",
+ __LINE__,(unsigned)i,wbuf_big[i],(unsigned)i,rbuf_big[i]);
+ TEST_ERROR;
+ } /* end if */
+
+ /* Closing */
+ if(H5Dclose(dsid_big) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid_big) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(big_mem_id) < 0) FAIL_STACK_ERROR
+
+ /* Delete datasets */
+ if(H5Ldelete(fid, DSET_FIXED_BIG, H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+ if(H5Ldelete(fid, DSET_FIXED_NOMAX, H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+ if(H5Ldelete(fid, DSET_FIXED_MAX, H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+
+ /* Close everything */
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename, fapl)) < 0)
+ TEST_ERROR
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ } /* end for */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ } /* end for */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Sclose(mem_id);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_fixed_array() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_single_chunk
+ *
+ * Purpose: Tests support for Single Chunk indexing type
+ *
+ * Create the following 2 datasets:
+ * 1) chunked dataset with NULL max dims and cur_dims = chunk_dims
+ * 2) chunked dataset with cur_dims = max_dims = chunk_dims
+ *
+ * Repeat the following test with/without compression filter
+ * Repeat the following test with H5D_ALLOC_TIME_EARLY/H5D_ALLOC_TIME_LATE/H5D_ALLOC_TIME_INCR
+ * For the old format,
+ * verify that v1 btree indexing type is used for
+ * all datasets with all settings
+ * For the new format:
+ * Verify that Single Chunk indexing type is used for
+ * all datasets with all settings
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Vailin Choi; July 2011
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_single_chunk(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE]; /* File name */
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t t_dcpl = -1; /* Dataset creation property list ID */
+
+ hid_t sid = -1, sid_max = -1; /* Dataspace ID for dataset with fixed dimensions */
+ hid_t did = -1, did_max = -1; /* Dataset ID for dataset with fixed dimensions */
+ hsize_t dim2[2] = {DSET_DIM1, DSET_DIM2}; /* Dataset dimensions */
+ hsize_t t_dim2[2] = {50, 100}; /* Dataset dimensions */
+ int wbuf[DSET_DIM1*DSET_DIM2]; /* write buffer */
+ int t_wbuf[50*100]; /* write buffer */
+ int rbuf[DSET_DIM1*DSET_DIM2]; /* read buffer */
+ int t_rbuf[50*100]; /* read buffer */
+
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ H5F_libver_t low, high; /* File format bounds */
+ H5D_alloc_time_t alloc_time; /* Storage allocation time */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ unsigned compress; /* Whether chunks should be compressed */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ size_t n, i; /* local index variables */
+ herr_t ret; /* Generic return value */
+ h5_stat_size_t empty_size; /* Size of an empty file */
+ h5_stat_size_t file_size; /* Size of each file created */
+
+ TESTING("datasets w/Single Chunk indexing");
+
+ h5_fixname(FILENAME[17], fapl, filename, sizeof filename);
+
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(fapl, &low, &high) < 0) FAIL_STACK_ERROR
+
+ /* Create and close the file to get the file size */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ STACK_ERROR
+
+ /* Get the size of the empty file */
+ if((empty_size = h5_get_file_size(filename, fapl)) < 0)
+ TEST_ERROR
+
+ for(i = n = 0; i < (DSET_DIM1 * DSET_DIM2); i++)
+ wbuf[i] = n++;
+
+ for(i = n = 0; i < (50* 100); i++)
+ t_wbuf[i] = n++;
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Loop over compressing chunks */
+ for(compress = FALSE; compress <= TRUE; compress++) {
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Loop over storage allocation time */
+ for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) {
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+ if((t_dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
+
+ /* Set chunking */
+ if((ret = H5Pset_chunk(dcpl, 2, dim2)) < 0)
+ FAIL_PUTS_ERROR(" Problem with setting chunk.")
+
+ if((ret = H5Pset_chunk(t_dcpl, 2, t_dim2)) < 0)
+ FAIL_PUTS_ERROR(" Problem with setting chunk.")
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Check if we should compress the chunks */
+ if(compress) {
+ if(H5Pset_deflate(dcpl, 9) < 0) FAIL_STACK_ERROR
+ if(H5Pset_deflate(t_dcpl, 9) < 0) FAIL_STACK_ERROR
+ }
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Set fill time */
+ if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
+ if(H5Pset_fill_time(t_dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR
+
+ /* Set allocation time */
+ if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+ if(H5Pset_alloc_time(t_dcpl, alloc_time) < 0) FAIL_STACK_ERROR
+
+ /* Create first dataset with cur and max dimensions */
+ if((sid_max = H5Screate_simple(2, dim2, dim2)) < 0) FAIL_STACK_ERROR
+ did_max = H5Dcreate2(fid, DSET_SINGLE_MAX, H5T_NATIVE_INT, sid_max, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ if(did_max < 0)
+ FAIL_PUTS_ERROR(" Creating Chunked Dataset with maximum dimensions.")
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(did_max, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ if(idx_type != H5D_CHUNK_IDX_SINGLE)
+ FAIL_PUTS_ERROR("should be using Single Chunk indexing");
+ } /* end if */
+ else {
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Write into dataset */
+ if(H5Dwrite(did_max, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did_max) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid_max) < 0) FAIL_STACK_ERROR
+
+ /* Create second dataset with curr dim but NULL max dim */
+ if((sid = H5Screate_simple(2, t_dim2, NULL)) < 0) FAIL_STACK_ERROR
+ did = H5Dcreate2(fid, DSET_SINGLE_NOMAX, H5T_NATIVE_INT, sid, H5P_DEFAULT, t_dcpl, H5P_DEFAULT);
+ if(did < 0)
+ FAIL_PUTS_ERROR(" Creating Chunked Dataset.")
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Chunk index type depends on whether we are using the latest version of the format */
+ if(low == H5F_LIBVER_LATEST) {
+ if(idx_type != H5D_CHUNK_IDX_SINGLE)
+ FAIL_PUTS_ERROR("should be using Single Chunk indexing");
+ } else {
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index");
+ } /* end else */
+
+ /* Write into dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, sid, H5P_DEFAULT, t_wbuf) < 0) TEST_ERROR;
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+
+ /* Open the first dataset */
+ if((did_max = H5Dopen2(fid, DSET_SINGLE_MAX, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Read from dataset */
+ if(H5Dread(did_max, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) TEST_ERROR;
+
+ /* Verify that written and read data are the same */
+ for(i = 0; i < (DSET_DIM1 * DSET_DIM2); i++)
+ if(rbuf[i] != wbuf[i]){
+ printf(" Line %d: Incorrect value, wbuf[%u]=%d, rbuf[%u]=%d\n",
+ __LINE__,(unsigned)i,wbuf[i],(unsigned)i,rbuf[i]);
+ TEST_ERROR;
+ } /* end if */
+
+ /* Closing */
+ if(H5Dclose(did_max) < 0) FAIL_STACK_ERROR
+
+ /* Open the second dataset */
+ if((did = H5Dopen2(fid, DSET_SINGLE_NOMAX, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Read from dataset */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, t_rbuf) < 0) TEST_ERROR;
+
+ /* Verify that written and read data are the same */
+ for(i = 0; i < (50* 100); i++)
+ if(t_rbuf[i] != t_wbuf[i]){
+ printf(" Line %d: Incorrect value, t_wbuf[%u]=%d, t_rbuf[%u]=%d\n",
+ __LINE__,(unsigned)i,t_wbuf[i],(unsigned)i,t_rbuf[i]);
+ TEST_ERROR;
+ } /* end if */
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+
+ /* Delete datasets */
+ if(H5Ldelete(fid, DSET_SINGLE_NOMAX, H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+ if(H5Ldelete(fid, DSET_SINGLE_MAX, H5P_DEFAULT) < 0) FAIL_STACK_ERROR
+
+ /* Close everything */
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename, fapl)) < 0)
+ TEST_ERROR
+
+ /* Verify the file is correct size */
+ if(file_size != empty_size)
+ TEST_ERROR
+
+ } /* end for */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ } /* end for */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Pclose(t_dcpl);
+ H5Dclose(did);
+ H5Dclose(did_max);
+ H5Sclose(sid);
+ H5Sclose(sid_max);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_single_chunk() */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * test_idx_compatible():
+ * Verify that the library can read datasets created with
+ * 1.6/1.8 library that use the B-tree indexing method.
+ *
+ * Programmer: Vailin Choi; 26th August, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_idx_compatible(void)
+{
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ const char *filename = NULL; /* old test file name */
+ unsigned j; /* Local index variable */
+ H5D_chunk_index_t idx_type; /* Chunked dataset index type */
+
+ /* Output message about test being performed */
+ TESTING("compatibility for 1.6/1.8 datasets that use B-tree indexing");
+
+ for(j = 0; j < NELMTS(OLD_FILENAME); j++) {
+
+ /* Generate correct name for test file by prepending the source path */
+ filename = H5_get_srcdir_filename(OLD_FILENAME[j]);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Should be able to read the dataset w/o filter created under 1.8/1.6 */
+ if((did = H5Dopen2(fid, DSET, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index")
+
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+
+ /* Should be able to read the dataset w/ filter created under 1.8/1.6 */
+ if((did = H5Dopen2(fid, DSET_FILTER, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0) FAIL_STACK_ERROR
+
+ /* Verify index type */
+ if(idx_type != H5D_CHUNK_IDX_BTREE)
+ FAIL_PUTS_ERROR("should be using v1 B-tree as index")
+
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+ }
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_idx_compatible */
+
+/*-------------------------------------------------------------------------
+ *
+ * test_unfiltered_edge_chunks():
+ * Tests that partial edge chunks aren't filtered when the
+ * H5D_CHUNK_FILTER_PARTIAL_CHUNKS option is set.
+ *
+ * Programmer: Neil Fortner; 17th March, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_unfiltered_edge_chunks(hid_t fapl)
+{
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ hid_t sid = -1; /* Dataspace id */
+ hid_t dcpl = -1; /* DCPL id */
+ hsize_t dim[2] = {4, 3}; /* Dataset dimensions */
+ hsize_t cdim[2] = {2, 2}; /* Chunk dimension */
+ char wbuf[4][3]; /* Write buffer */
+ char rbuf[4][3]; /* Read buffer */
+ char filename[FILENAME_BUF_SIZE] = ""; /* old test file name */
+ unsigned opts; /* Chunk options */
+ unsigned i, j; /* Local index variables */
+
+ /* Output message about test being performed */
+ TESTING("disabled partial chunk filters");
+
+ h5_fixname(FILENAME[14], fapl, filename, sizeof filename);
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Register byte-counting filter */
+ if(H5Zregister(H5Z_COUNT) < 0)
+ TEST_ERROR
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(2, dim, NULL)) < 0)
+ TEST_ERROR
+
+ /* Create DCPL */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set chunk dimensions */
+ if(H5Pset_chunk(dcpl, 2, cdim) < 0)
+ TEST_ERROR
+
+ /* Add "count" filter */
+ if(H5Pset_filter(dcpl, H5Z_FILTER_COUNT, 0u, (size_t)0, NULL) < 0)
+ TEST_ERROR
+
+ /* Disable filters on partial chunks */
+ if(H5Pget_chunk_opts(dcpl, &opts) < 0)
+ TEST_ERROR
+ opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+ if(H5Pset_chunk_opts(dcpl, opts) < 0)
+ TEST_ERROR
+
+ /* Initialize write buffer */
+ for(i=0; i<dim[0]; i++)
+ for(j=0; j<dim[1]; j++)
+ wbuf[i][j] = (char)(2 * i) - (char)j;
+
+ /* Reset byte counts */
+ count_nbytes_read = (size_t)0;
+ count_nbytes_written = (size_t)0;
+
+ /* Create dataset */
+ if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_CHAR, sid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Nothing should have been written, as we are not using early allocation */
+ if(count_nbytes_read != (size_t)0)
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)0)
+ TEST_ERROR
+
+ /* Write data */
+ if(H5Dwrite(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ TEST_ERROR
+
+ /* Close dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Make sure only 2 of the 4 chunks were written through the filter (4 bytes
+ * each) */
+ if(count_nbytes_read != (size_t)0)
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+
+ /* Reopen the dataset */
+ if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Read the dataset */
+ if(H5Dread(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ TEST_ERROR
+
+ /* Verify that data read == data written */
+ for(i=0; i<dim[0]; i++)
+ for(j=0; j<dim[1]; j++)
+ if(rbuf[i][j] != wbuf[i][j])
+ TEST_ERROR
+
+ /* Make sure only 2 of the 4 chunks were read through the filter (4 bytes
+ * each) */
+ if(count_nbytes_read != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+ if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1]))
+ TEST_ERROR
+
+ /* Close IDs */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did);
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_unfiltered_edge_chunks */
+
+
+/*-------------------------------------------------------------------------
* Function: test_large_chunk_shrink
*
* Purpose: Tests support for shrinking a chunk larger than 1 MB by a
@@ -7966,6 +10069,609 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_swmr_non_latest
+ *
+ * Purpose: Checks that a file created with either:
+ * (a) SWMR-write + non-latest-format
+ * (b) write + latest format
+ * will generate datset with latest chunk indexing type.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_swmr_non_latest(const char *env_h5_driver, hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t gid = -1; /* Group ID */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t did = -1; /* Dataset ID */
+ hsize_t dim[1], dims2[2]; /* Size of dataset */
+ hsize_t max_dim[1], max_dims2[2]; /* Maximum size of dataset */
+ hsize_t chunk_dim[1], chunk_dims2[2]; /* Chunk dimensions */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+ int data; /* Data to be written to the dataset */
+ H5F_libver_t low; /* File format low bound */
+
+ TESTING("File created with write+latest-format/SWMR-write+non-latest-format: dataset with latest chunk index");
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ if(!H5FD_supports_swmr_test(env_h5_driver)) {
+ SKIPPED();
+ HDputs(" Test skipped due to VFD not supporting SWMR I/O.");
+ return 0;
+ }
+
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(fapl, &low, NULL) < 0)
+ FAIL_STACK_ERROR
+
+ h5_fixname(FILENAME[18], fapl, filename, sizeof filename);
+
+ if(low == H5F_LIBVER_LATEST) {
+ /* Create file with write+latest-format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+ } else {
+ /* Create file with SWMR-write+non-latest-format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+ }
+
+ /* Create a chunked dataset: this will use extensible array chunk indexing */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ chunk_dim[0] = 6;
+ if(H5Pset_chunk(dcpl, 1, chunk_dim) < 0)
+ FAIL_STACK_ERROR
+
+ dim[0] = 1;
+ max_dim[0] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(1, dim, max_dim)) < 0)
+ FAIL_STACK_ERROR
+
+ if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Write to the dataset */
+ data = 100;
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify the dataset's indexing type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("created dataset not indexed by extensible array")
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ /* Open the file again */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset in the file */
+ if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify the dataset's indexing type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("created dataset not indexed by extensible array")
+
+ /* Read from the dataset and verify data read is correct */
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0)
+ FAIL_STACK_ERROR
+ if(data != 100)
+ FAIL_STACK_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+
+ /* Create a group in the file */
+ if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a chunked dataset in the group: this will use v2 B-tree chunk indexing */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ chunk_dims2[0] = chunk_dims2[1] = 10;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims2) < 0)
+ FAIL_STACK_ERROR
+
+ dims2[0] = dims2[1] = 1;
+ max_dims2[0] = max_dims2[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims2)) < 0)
+ FAIL_STACK_ERROR
+
+ if((did = H5Dcreate2(gid, DSET_CHUNKED_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify the dataset's indexing type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("created dataset not indexed by v2 B-tree")
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
+ if(H5Gclose(gid) < 0) FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ /* Open the file again */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Open the group */
+ if((gid = H5Gopen2(fid, "group", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Open the dataset in the group */
+ if((did = H5Dopen2(gid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify the dataset's indexing type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("created dataset not indexed by v2 B-tree")
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+ if(H5Gclose(gid) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+
+ /* Reopen the file with SWMR-write */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Open the dataset in the file */
+ if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify the dataset's indexing type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("created dataset not indexed by extensible array")
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+
+ /* Open the group */
+ if((gid = H5Gopen2(fid, "group", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Open the dataset in the group */
+ if((did = H5Dopen2(gid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify the dataset's indexing type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("created dataset not indexed by v2 B-tree")
+
+ /* Write to the dataset in the group */
+ data = 99;
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0)
+ FAIL_STACK_ERROR
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+ if(H5Gclose(gid) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ /* Open the file again with SWMR read access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if((gid = H5Gopen2(fid, "group", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did = H5Dopen2(gid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Read from the dataset and verify data read is correct */
+ data = 0;
+ if(H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0)
+ FAIL_STACK_ERROR
+ if(data != 99) FAIL_STACK_ERROR
+
+ /* Closing */
+ if(H5Dclose(did) < 0) FAIL_STACK_ERROR
+ if(H5Gclose(gid) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(did);
+ H5Sclose(sid);
+ H5Gclose(gid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_swmr_non_latest() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_earray_hdr_fd
+ *
+ * Purpose: Tests that extensible array header flush dependencies
+ * are created and torn down correctly when used as a
+ * chunk index.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_earray_hdr_fd(const char *env_h5_driver, hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t did = -1;
+ hid_t tid = -1;
+ hid_t dcpl = -1;
+ hid_t msid = -1;
+ H5D_chunk_index_t idx_type;
+ const hsize_t shape[1] = { 8 };
+ const hsize_t maxshape[1] = { H5S_UNLIMITED };
+ const hsize_t chunk[1] = { 8 };
+ const int buffer[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ H5O_info_t info;
+
+ TESTING("Extensible array chunk index header flush dependencies handled correctly");
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ if(!H5FD_supports_swmr_test(env_h5_driver)) {
+ SKIPPED();
+ HDputs(" Test skipped due to VFD not supporting SWMR I/O.");
+ return 0;
+ } /* end if */
+
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR;
+
+ h5_fixname(FILENAME[19], fapl, filename, sizeof(filename));
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create a dataset with one unlimited dimension */
+ if((sid = H5Screate_simple(1, shape, maxshape)) < 0)
+ FAIL_STACK_ERROR;
+ if((tid = H5Tcopy(H5T_NATIVE_INT32)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 1, chunk) < 0)
+ FAIL_STACK_ERROR;
+ if((did = H5Dcreate2(fid, DSET_EARRAY_HDR_FD, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT )) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("should be using extensible array as index");
+
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Fstart_swmr_write(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Write data to the dataset */
+ if((did = H5Dopen2(fid, DSET_EARRAY_HDR_FD, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((tid = H5Dget_type(did)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* The second call triggered a bug in the library (JIRA issue: SWMR-95) */
+ if(H5Oget_info_by_name(fid, DSET_EARRAY_HDR_FD, &info, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Oget_info_by_name(fid, DSET_EARRAY_HDR_FD, &info, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5Dclose(did);
+ H5Pclose(dcpl);
+ H5Tclose(tid);
+ H5Sclose(sid);
+ H5Sclose(msid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_earray_hdr_fd() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_farray_hdr_fd
+ *
+ * Purpose: Tests that fixed array header flush dependencies
+ * are created and torn down correctly when used as a
+ * chunk index.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_farray_hdr_fd(const char *env_h5_driver, hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t did = -1;
+ hid_t tid = -1;
+ hid_t dcpl = -1;
+ hid_t msid = -1;
+ H5D_chunk_index_t idx_type;
+ const hsize_t shape[1] = { 8 };
+ const hsize_t maxshape[1] = { 64 };
+ const hsize_t chunk[1] = { 8 };
+ const int buffer[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ H5O_info_t info;
+
+ TESTING("Fixed array chunk index header flush dependencies handled correctly");
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ if(!H5FD_supports_swmr_test(env_h5_driver)) {
+ SKIPPED();
+ HDputs(" Test skipped due to VFD not supporting SWMR I/O.");
+ return 0;
+ } /* end if */
+
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR;
+
+ h5_fixname(FILENAME[20], fapl, filename, sizeof(filename));
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create a chunked dataset with fixed dimensions */
+ if((sid = H5Screate_simple(1, shape, maxshape)) < 0)
+ FAIL_STACK_ERROR;
+ if((tid = H5Tcopy(H5T_NATIVE_INT32)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 1, chunk) < 0)
+ FAIL_STACK_ERROR;
+ if((did = H5Dcreate2(fid, DSET_FARRAY_HDR_FD, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT )) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_FARRAY)
+ FAIL_PUTS_ERROR("should be using fixed array as index");
+
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Fstart_swmr_write(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Write data to the dataset */
+ if((did = H5Dopen2(fid, DSET_FARRAY_HDR_FD, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((tid = H5Dget_type(did)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* The second call triggered a bug in the library (JIRA issue: SWMR-95) */
+ if(H5Oget_info_by_name(fid, DSET_FARRAY_HDR_FD, &info, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Oget_info_by_name(fid, DSET_FARRAY_HDR_FD, &info, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5Dclose(did);
+ H5Pclose(dcpl);
+ H5Tclose(tid);
+ H5Sclose(sid);
+ H5Sclose(msid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_farray_hdr_fd() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_bt2_hdr_fd
+ *
+ * Purpose: Tests that version 2 B-tree header flush dependencies
+ * are created and torn down correctly when used as a
+ * chunk index.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_bt2_hdr_fd(const char *env_h5_driver, hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t did = -1;
+ hid_t tid = -1;
+ hid_t dcpl = -1;
+ hid_t msid = -1;
+ H5D_chunk_index_t idx_type;
+ const hsize_t shape[2] = { 8, 8 };
+ const hsize_t maxshape[2] = { H5S_UNLIMITED, H5S_UNLIMITED };
+ const hsize_t chunk[2] = { 8, 8 };
+ const int buffer[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ H5O_info_t info;
+
+ TESTING("Version 2 B-tree chunk index header flush dependencies handled correctly");
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ if(!H5FD_supports_swmr_test(env_h5_driver)) {
+ SKIPPED();
+ HDputs(" Test skipped due to VFD not supporting SWMR I/O.");
+ return 0;
+ } /* end if */
+
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR;
+
+ h5_fixname(FILENAME[21], fapl, filename, sizeof(filename));
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create a chunked dataset with fixed dimensions */
+ if((sid = H5Screate_simple(2, shape, maxshape)) < 0)
+ FAIL_STACK_ERROR;
+ if((tid = H5Tcopy(H5T_NATIVE_INT32)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_chunk(dcpl, 2, chunk) < 0)
+ FAIL_STACK_ERROR;
+ if((did = H5Dcreate2(fid, DSET_BT2_HDR_FD, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT )) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using fixed array as index");
+
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Fstart_swmr_write(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Write data to the dataset */
+ if((did = H5Dopen2(fid, DSET_BT2_HDR_FD, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((tid = H5Dget_type(did)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* The second call triggered a bug in the library (JIRA issue: SWMR-95) */
+ if(H5Oget_info_by_name(fid, DSET_BT2_HDR_FD, &info, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Oget_info_by_name(fid, DSET_BT2_HDR_FD, &info, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5Dclose(did);
+ H5Pclose(dcpl);
+ H5Tclose(tid);
+ H5Sclose(sid);
+ H5Sclose(msid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_bt2_hdr_fd() */
+
+
+/*-------------------------------------------------------------------------
* Function: test_zero_dim_dset
*
* Purpose: Tests support for reading a 1D chunled dataset with
@@ -7992,7 +10698,7 @@ test_zero_dim_dset(hid_t fapl)
TESTING("shrinking large chunk");
- h5_fixname(FILENAME[13], fapl, filename, sizeof filename);
+ h5_fixname(FILENAME[16], fapl, filename, sizeof filename);
/* Create file */
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
@@ -9091,7 +11797,7 @@ main(void)
/* Don't run this test using certain file drivers */
envval = HDgetenv("HDF5_DRIVER");
if(envval == NULL)
- envval = "nomatch";
+ envval = "sec2";
/* Set the random # seed */
HDsrandom((unsigned)HDtime(NULL));
@@ -9185,10 +11891,21 @@ main(void)
nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0);
nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0);
nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_chunk_fast(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_reopen_chunk_fast(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_chunk_fast_bug1(my_fapl) < 0 ? 1 : 0);
nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_fixed_array(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_idx_compatible() < 0 ? 1 : 0);
+ nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_single_chunk(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_swmr_non_latest(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_earray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_farray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_bt2_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
if(H5Fclose(file) < 0)
goto error;
diff --git a/test/earray.c b/test/earray.c
index 368ee42..3bd46e8 100644
--- a/test/earray.c
+++ b/test/earray.c
@@ -201,6 +201,7 @@ const H5AC_class_t H5AC_EARRAY_TEST[1] = {{
/* mem_type */ H5FD_MEM_DEFAULT,
/* flags */ H5AC__CLASS_NO_IO_FLAG,
/* get_load_size */ (H5AC_get_load_size_func_t)earray_cache_test_get_load_size,
+ /* varify_chksum */ (H5AC_verify_chksum_func_t)NULL,
/* deserialize */ (H5AC_deserialize_func_t)earray_cache_test_deserialize,
/* image_len */ (H5AC_image_len_func_t)earray_cache_test_image_len,
/* pre_serialize */ (H5AC_pre_serialize_func_t)NULL,
@@ -332,10 +333,10 @@ finish_tparam(earray_test_param_t *tparam)
*-------------------------------------------------------------------------
*/
static int
-create_file(hid_t fapl, hid_t *file, H5F_t **f)
+create_file(unsigned flags, hid_t fapl, hid_t *file, H5F_t **f)
{
/* Create the file to work on */
- if((*file = H5Fcreate(filename_g, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((*file = H5Fcreate(filename_g, flags, H5P_DEFAULT, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -893,7 +894,7 @@ test_create(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t H5_ATTR_UNUSE
haddr_t ea_addr = HADDR_UNDEF; /* Array address in file */
/* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
+ if(create_file(H5F_ACC_TRUNC, fapl, &file, &f) < 0)
TEST_ERROR
/*
@@ -1115,7 +1116,7 @@ test_reopen(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t *tparam)
haddr_t ea_addr = HADDR_UNDEF; /* Array address in file */
/* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
+ if(create_file(H5F_ACC_TRUNC, fapl, &file, &f) < 0)
TEST_ERROR
/*
@@ -1188,7 +1189,7 @@ test_open_twice(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t *tparam)
haddr_t ea_addr = HADDR_UNDEF; /* Array address in file */
/* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
+ if(create_file(H5F_ACC_TRUNC, fapl, &file, &f) < 0)
TEST_ERROR
/*
@@ -1294,7 +1295,7 @@ test_delete_open(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t *tparam)
h5_stat_size_t file_size; /* File size, after deleting array */
/* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
+ if(create_file(H5F_ACC_TRUNC, fapl, &file, &f) < 0)
TEST_ERROR
/*
@@ -1387,296 +1388,6 @@ error:
return 1;
} /* test_delete_open() */
-
-/*-------------------------------------------------------------------------
- * Function: test_flush_depend_cb
- *
- * Purpose: Callback for flush dependency 'depend'/'undepend' and
- * 'support'/'unsupport' routines
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 26, 2009
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-test_flush_depend_cb(const void *_elmt, size_t nelmts, void *udata)
-{
- earray_flush_depend_ctx_t *ctx = (earray_flush_depend_ctx_t *)udata;
- const uint64_t *elmt = (const uint64_t *)_elmt; /* Convenience pointer to native elements */
-
- /* Check for out of order flush */
- if(ctx->base_obj)
- return(FAIL);
-
- /* Look for magic values */
- while(nelmts > 0) {
- /* Check for elements of interest */
- if((uint64_t)0 == *elmt) {
- /* Check for out-of-order flush */
- if(!ctx->idx0_obj)
- return(FAIL);
-
- /* Indicate that the element was flushed */
- ctx->idx0_elem = TRUE;
- } /* end if */
- else if((uint64_t)1 == *elmt) {
- /* Check for out-of-order flush */
- if(!ctx->idx1_obj)
- return(FAIL);
-
- /* Indicate that the element was flushed */
- ctx->idx1_elem = TRUE;
- } /* end if */
- else if((uint64_t)10000 == *elmt) {
- /* Check for out-of-order flush */
- if(!ctx->idx10000_obj)
- return(FAIL);
-
- /* Indicate that the element was flushed */
- ctx->idx10000_elem = TRUE;
- } /* end if */
-
- /* Decrement elements left to inspect */
- nelmts--;
- elmt++;
- } /* end while */
-
- return(SUCCEED);
-} /* end test_flush_depend_cb() */
-
-
-/*-------------------------------------------------------------------------
- * Function: test_flush_depend
- *
- * Purpose: Exercise flush dependency 'depend'/'undepend' routines
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Thursday, May 21, 2009
- *
- *-------------------------------------------------------------------------
- */
-static unsigned
-test_flush_depend(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t H5_ATTR_UNUSED *tparam)
-{
- hid_t file = -1; /* File ID */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5EA_t *ea = NULL; /* Extensible array wrapper */
- haddr_t ea_addr = HADDR_UNDEF; /* Array address in file */
- H5EA__ctx_cb_t cb; /* Extensible array context action info */
- earray_flush_depend_ctx_t fd_info; /* Context information for flush depend test */
- haddr_t base_addr; /* Base test entry address */
- earray_test_t *base_entry; /* Pointer to base test entry */
- haddr_t addr1; /* Test entry #1 address */
- earray_test_t *entry1; /* Pointer to test entry #1 */
- haddr_t addr2; /* Test entry #2 address */
- earray_test_t *entry2; /* Pointer to test entry #2 */
- haddr_t addr3; /* Test entry #3 address */
- earray_test_t *entry3; /* Pointer to test entry #3 */
- uint64_t welmt; /* Element to write */
- hsize_t idx; /* Index value of element */
-
- /* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
- TEST_ERROR
-
- /*
- * Display testing message
- */
- TESTING("flush dependencies on array metadata");
-
- /* Create array */
- cb.encode = test_flush_depend_cb;
- HDmemset(&fd_info, 0, sizeof(earray_flush_depend_ctx_t));
- cb.udata = &fd_info;
- if(create_array(f, H5P_DATASET_XFER_DEFAULT, cparam, &ea, &ea_addr, &cb) < 0)
- TEST_ERROR
-
- /* Verify the creation parameters */
- if(verify_cparam(ea, cparam) < 0)
- TEST_ERROR
-
- /* Create base entry to insert */
- if(NULL == (base_entry = (earray_test_t *)HDmalloc(sizeof(earray_test_t))))
- TEST_ERROR
- HDmemset(base_entry, 0, sizeof(earray_test_t));
- base_entry->idx = (uint64_t)-1;
- base_entry->fd_info = &fd_info;
-
- /* Insert test entry into cache */
- base_addr = HADDR_MAX;
- if(H5AC_insert_entry(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, base_addr, base_entry, H5AC__PIN_ENTRY_FLAG) < 0)
- TEST_ERROR
-
- /* Set the base entry as a flush dependency for the array */
- if(H5EA_depend((H5AC_info_t *)base_entry, ea) < 0)
- TEST_ERROR
-
- /* Create entry #1 to insert */
- if(NULL == (entry1 = (earray_test_t *)HDmalloc(sizeof(earray_test_t))))
- TEST_ERROR
- HDmemset(entry1, 0, sizeof(earray_test_t));
- entry1->fd_info = &fd_info;
-
- /* Insert test entry into cache */
- addr1 = HADDR_MAX - 1;
- if(H5AC_insert_entry(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr1, entry1, H5AC__PIN_ENTRY_FLAG) < 0)
- TEST_ERROR
-
- /* Set the test entry as a flush dependency for 0th index in the array */
- if(H5EA_support(ea, H5P_DATASET_XFER_DEFAULT, (hsize_t)0, (H5AC_info_t *)entry1) < 0)
- TEST_ERROR
-
- /* Set element of array */
- welmt = (uint64_t)0;
- idx = 0;
- if(H5EA_set(ea, H5P_DATASET_XFER_DEFAULT, idx, &welmt) < 0)
- FAIL_STACK_ERROR
-
- /* Create entry #2 to insert */
- if(NULL == (entry2 = (earray_test_t *)HDmalloc(sizeof(earray_test_t))))
- TEST_ERROR
- HDmemset(entry2, 0, sizeof(earray_test_t));
- entry2->idx = (uint64_t)1;
- entry2->fd_info = &fd_info;
-
- /* Insert test entry into cache */
- addr2 = HADDR_MAX - 2;
- if(H5AC_insert_entry(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr2, entry2, H5AC__PIN_ENTRY_FLAG) < 0)
- TEST_ERROR
-
- /* Set the test entry as a flush dependency for 1st index in the array */
- if(H5EA_support(ea, H5P_DATASET_XFER_DEFAULT, (hsize_t)1, (H5AC_info_t *)entry2) < 0)
- TEST_ERROR
-
- /* Set element of array */
- welmt = (uint64_t)1;
- idx = 1;
- if(H5EA_set(ea, H5P_DATASET_XFER_DEFAULT, idx, &welmt) < 0)
- FAIL_STACK_ERROR
-
- /* Create entry #3 to insert */
- if(NULL == (entry3 = (earray_test_t *)HDmalloc(sizeof(earray_test_t))))
- TEST_ERROR
- HDmemset(entry3, 0, sizeof(earray_test_t));
- entry3->idx = (uint64_t)10000;
- entry3->fd_info = &fd_info;
-
- /* Insert test entry into cache */
- addr3 = HADDR_MAX - 3;
- if(H5AC_insert_entry(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr3, entry3, H5AC__PIN_ENTRY_FLAG) < 0)
- TEST_ERROR
-
- /* Set the test entry as a flush dependency for 10,000th index in the array */
- if(H5EA_support(ea, H5P_DATASET_XFER_DEFAULT, (hsize_t)10000, (H5AC_info_t *)entry3) < 0)
- TEST_ERROR
-
- /* Set element of array */
- welmt = (uint64_t)10000;
- idx = 10000;
- if(H5EA_set(ea, H5P_DATASET_XFER_DEFAULT, idx, &welmt) < 0)
- FAIL_STACK_ERROR
-
-
- /* Flush the cache */
- if(H5Fflush(file, H5F_SCOPE_GLOBAL) < 0)
- TEST_ERROR
-
- /* Check that all callback flags have been set */
- if(!fd_info.base_obj)
- TEST_ERROR
- if(!fd_info.idx0_obj)
- TEST_ERROR
- if(!fd_info.idx0_elem)
- TEST_ERROR
- if(!fd_info.idx1_obj)
- TEST_ERROR
- if(!fd_info.idx1_elem)
- TEST_ERROR
- if(!fd_info.idx10000_obj)
- TEST_ERROR
- if(!fd_info.idx10000_elem)
- TEST_ERROR
-
-
- /* Remove the base entry as a flush dependency for the array */
- if(H5EA_undepend((H5AC_info_t *)base_entry, ea) < 0)
- TEST_ERROR
-
- /* Protect the base entry */
- if(NULL == (base_entry = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, base_addr, NULL, H5AC__NO_FLAGS_SET)))
- TEST_ERROR
-
- /* Unprotect & unpin the base entry */
- if(H5AC_unprotect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, base_addr, base_entry, (H5AC__UNPIN_ENTRY_FLAG | H5AC__DELETED_FLAG)) < 0)
- TEST_ERROR
-
- /* Remove the test entry as a flush dependency for 0th index in the array */
- if(H5EA_unsupport(ea, H5P_DATASET_XFER_DEFAULT, (hsize_t)0, (H5AC_info_t *)entry1) < 0)
- TEST_ERROR
-
- /* Protect the test entry */
- if(NULL == (entry1 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr1, NULL, H5AC__NO_FLAGS_SET)))
- TEST_ERROR
-
- /* Unprotect & unpin the test entry */
- if(H5AC_unprotect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr1, entry1, (H5AC__UNPIN_ENTRY_FLAG | H5AC__DELETED_FLAG)) < 0)
- TEST_ERROR
-
- /* Remove the test entry as a flush dependency for 1st index in the array */
- if(H5EA_unsupport(ea, H5P_DATASET_XFER_DEFAULT, (hsize_t)1, (H5AC_info_t *)entry2) < 0)
- TEST_ERROR
-
- /* Protect the test entry */
- if(NULL == (entry2 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr2, NULL, H5AC__NO_FLAGS_SET)))
- TEST_ERROR
-
- /* Unprotect & unpin the test entry */
- if(H5AC_unprotect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr2, entry2, (H5AC__UNPIN_ENTRY_FLAG | H5AC__DELETED_FLAG)) < 0)
- TEST_ERROR
-
- /* Remove the test entry as a flush dependency for 10,000th index in the array */
- if(H5EA_unsupport(ea, H5P_DATASET_XFER_DEFAULT, (hsize_t)10000, (H5AC_info_t *)entry3) < 0)
- TEST_ERROR
-
- /* Protect the test entry */
- if(NULL == (entry3 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr3, NULL, H5AC__NO_FLAGS_SET)))
- TEST_ERROR
-
- /* Unprotect & unpin the test entry */
- if(H5AC_unprotect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr3, entry3, (H5AC__UNPIN_ENTRY_FLAG | H5AC__DELETED_FLAG)) < 0)
- TEST_ERROR
-
- /* Close the extensible array */
- if(H5EA_close(ea, H5P_DATASET_XFER_DEFAULT) < 0)
- FAIL_STACK_ERROR
- ea = NULL;
-
- /* Close the file */
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* All tests passed */
- PASSED()
-
- return 0;
-
-error:
- H5E_BEGIN_TRY {
- if(ea)
- H5EA_close(ea, H5P_DATASET_XFER_DEFAULT);
- H5Fclose(file);
- } H5E_END_TRY;
-
- return 1;
-} /* test_flush_depend() */
-
/* Extensible array iterator info for forward iteration */
typedef struct eiter_fw_t {
hsize_t idx; /* Index of next array location */
@@ -2537,7 +2248,7 @@ test_set_elmts(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t *tparam,
TESTING(test_str);
/* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
+ if(create_file(H5F_ACC_TRUNC, fapl, &file, &f) < 0)
TEST_ERROR
/* Create array */
@@ -2711,7 +2422,7 @@ test_skip_elmts(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t *tparam,
TESTING(test_str);
/* Create file & retrieve pointer to internal file object */
- if(create_file(fapl, &file, &f) < 0)
+ if(create_file(H5F_ACC_TRUNC, fapl, &file, &f) < 0)
TEST_ERROR
/* Create array */
@@ -2858,6 +2569,11 @@ main(void)
unsigned nerrors = 0; /* Cumulative error count */
time_t curr_time; /* Current time, for seeding random number generator */
int ExpressMode; /* Test express value */
+ const char *env_h5_drvr; /* File Driver value from environment */
+
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
/* Reset library */
h5_reset();
@@ -2923,7 +2639,14 @@ main(void)
nerrors += test_reopen(fapl, &cparam, &tparam);
nerrors += test_open_twice(fapl, &cparam, &tparam);
nerrors += test_delete_open(fapl, &cparam, &tparam);
- nerrors += test_flush_depend(fapl, &cparam, &tparam);
+ /*
+ * nerrors += test_flush_depend(env_h5_drvr, fapl, &cparam, &tparam);
+ * The test test_flush_depend() was removed with this checkin because chunk proxy for SWMR handling is
+ * no longer used: chunk allocation is moved up to the chunk layer, data is written to the chunk before
+ * inserting the chunk address into the index structure.
+ * New tests will be written in the future to verify dependency within the index data structures and
+ * with the object header.
+ */
/* Iterate over the type of capacity tests */
for(curr_iter = EARRAY_ITER_FW; curr_iter < EARRAY_ITER_NITERS; H5_INC_ENUM(earray_iter_type_t, curr_iter)) {
diff --git a/test/family_v16_00000.h5 b/test/family_v16_00000.h5
index ac75ea9..aaa3dad 100644
--- a/test/family_v16_00000.h5
+++ b/test/family_v16_00000.h5
Binary files differ
diff --git a/test/filespace_1_6.h5 b/test/filespace_1_6.h5
index 5afc718..c8aa9df 100644
--- a/test/filespace_1_6.h5
+++ b/test/filespace_1_6.h5
Binary files differ
diff --git a/test/fill_old.h5 b/test/fill_old.h5
index e77f519..4dd7740 100644
--- a/test/fill_old.h5
+++ b/test/fill_old.h5
Binary files differ
diff --git a/test/flush2.c b/test/flush2.c
index e86d646..ddb1c59 100644
--- a/test/flush2.c
+++ b/test/flush2.c
@@ -24,6 +24,11 @@
*/
#include "h5test.h"
+/* Make this private property (defined in H5Fprivate.h) available */
+/* This is used in the helper routine clear_status_flags() */
+#define H5F_ACS_CLEAR_STATUS_FLAGS_NAME "clear_status_flags"
+
+
const char *FILENAME[] = {
"flush",
"noflush",
@@ -141,6 +146,53 @@ error:
return 1;
} /* end check_file() */
+/*-------------------------------------------------------------------------
+ * Function: clear_status_flags
+ *
+ * Purpose: To clear the status_flags in the superblock of the file.
+ * It is smilar to the tool "h5clear".
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi
+ * July 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+clear_status_flags(char *name, hid_t fapl)
+{
+ hid_t new_fapl = -1;
+ hid_t fid = -1;
+ hbool_t clear = TRUE;
+
+ /* Get a copy of fapl */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Set this private property */
+ if(H5Pset(new_fapl, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Has to open rw */
+ if((fid = H5Fopen(name, H5F_ACC_RDWR, new_fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* CLose the property list */
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ return 0;
+
+error:
+ return 1;
+} /* clear_status_flags() */
+
/*-------------------------------------------------------------------------
* Function: main
@@ -173,6 +225,13 @@ main(void)
/* Check the case where the file was flushed */
h5_fixname(FILENAME[0], fapl, name, sizeof name);
+
+ /* Clear the status_flags of the file which is flushed and exited in flush1.c */
+ if(clear_status_flags(name, fapl) < 0) {
+ H5_FAILED()
+ goto error;
+ }
+
if(check_file(name, fapl, FALSE)) {
H5_FAILED()
goto error;
@@ -188,6 +247,8 @@ main(void)
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
h5_fixname(FILENAME[1], fapl, name, sizeof name);
+ /* No need to clear the status_flags because this file is not flushed in flush1.c */
+ /* H5Fopen() in check_file() will just return error */
if(check_file(name, fapl, FALSE))
PASSED()
else {
@@ -208,6 +269,13 @@ main(void)
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
h5_fixname(FILENAME[2], fapl, name, sizeof name);
+
+ /* Clear the status_flags of the file which is flushed and exited in flush1.c */
+ if(clear_status_flags(name, fapl) < 0) {
+ H5_FAILED()
+ goto error;
+ }
+
if(check_file(name, fapl, TRUE))
PASSED()
else {
diff --git a/test/flushrefresh.c b/test/flushrefresh.c
new file mode 100644
index 0000000..0fc6e2f
--- /dev/null
+++ b/test/flushrefresh.c
@@ -0,0 +1,1279 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Mike McGreevy
+ * June 30, 2010
+ *
+ * Purpose: This test file contains routines used to test flushing and
+ * refreshing individual objects' metadata from the cache.
+ *
+ * Note: This file should NOT be run manually. Instead, invoke it
+ * via its associated test script, testflushrefresh.sh
+ *
+ */
+
+/* ======== */
+/* Includes */
+/* ======== */
+
+#include "testhdf5.h"
+
+/* ======= */
+/* Defines */
+/* ======= */
+
+/* Name of Test File */
+#define FILENAME "flushrefresh.h5"
+
+/* Names of Signal Files */
+#define SIGNAL_TO_SCRIPT "flushrefresh_VERIFICATION_START"
+#define SIGNAL_BETWEEN_PROCESSES_1 "flushrefresh_VERIFICATION_CHECKPOINT1"
+#define SIGNAL_BETWEEN_PROCESSES_2 "flushrefresh_VERIFICATION_CHECKPOINT2"
+#define SIGNAL_FROM_SCRIPT "flushrefresh_VERIFICATION_DONE"
+
+/* Signal Timeout Length in Secs */
+#define SIGNAL_TIMEOUT 300
+
+/* Paths to Various Objects in the Testfile */
+#define RG "/"
+#define D1 "/Dataset1"
+#define D2 "/Group1/Dataset2"
+#define D3 "/Group3/Dataset3"
+#define G1 "/Group1"
+#define G2 "/Group1/Group2"
+#define G3 "/Group3"
+#define T1 "/CommittedDatatype1"
+#define T2 "/Group1/Group2/CommittedDatatype2"
+#define T3 "/Group3/CommittedDatatype3"
+
+/* Flushed States */
+#define FLUSHED "FLUSHED"
+#define NOT_FLUSHED "NOT_FLUSHED"
+
+/* Error Handling */
+/* For errors occuring in the main process, use the standard TEST_ERROR macro.
+ For errors occurring in the spawned process (from the test script), use
+ the PROCESS_ERROR macro, which will send a signal to the main process so the
+ main process can propogate errors correctly. */
+FILE * errorfile;
+#define ERRFILE "flushrefresh_ERROR"
+#define PROCESS_ERROR \
+ { errorfile = fopen(ERRFILE, "w+"); \
+ HDfprintf(errorfile, "Error occurred in flushrefresh.\n"); \
+ HDfflush(errorfile); \
+ HDfclose(errorfile); \
+ TEST_ERROR; \
+ }
+
+#define CLEANUP_FILES \
+ { \
+ HDremove(ERRFILE); \
+ HDremove(FILENAME); \
+ HDremove(SIGNAL_TO_SCRIPT); \
+ HDremove(SIGNAL_BETWEEN_PROCESSES_1); \
+ HDremove(SIGNAL_BETWEEN_PROCESSES_2); \
+ HDremove(SIGNAL_FROM_SCRIPT); \
+ } \
+
+/* ===================== */
+/* Function Declarations */
+/* ===================== */
+
+/* Main */
+int main(int argc, const char *argv[]);
+
+/* Flush Test Framework */
+herr_t test_flush(void);
+herr_t flush_verification(const char * obj_pathname, const char * expected);
+herr_t run_flush_verification_process(const char * obj_pathname, const char * expected);
+
+/* Refresh Test Framework */
+herr_t test_refresh(void);
+herr_t refresh_verification(const char * obj_pathname);
+herr_t start_refresh_verification_process(const char * obj_pathname);
+herr_t end_refresh_verification_process(void);
+
+/* Other Helper Functions */
+herr_t check_for_errors(void);
+herr_t end_verification(void);
+herr_t wait_for_signal(const char * waitfor);
+void send_signal(const char * send, const char * arg1, const char * arg2);
+
+/* ========= */
+/* Functions */
+/* ========= */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: This function coordinates the test of flush/refresh
+ * functionality verification. It accepts either one, two or
+ * no command line parameters. The main test routine runs
+ * with no command line parameters specified, while verification
+ * routines run with one or two command line parameters.
+ *
+ * Note: This program should not be run manually, as the
+ * test is controlled by the testflushrefresh.sh script. Running
+ * the flushrefresh program manually will result in failure, as
+ * it will time out waiting for a signal from the test script
+ * which will never come.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 1, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int main(int argc, const char *argv[])
+{
+ /* Variables */
+ const char *envval = NULL;
+
+ /* Initialize library */
+ if(H5open() < 0)
+ TEST_ERROR;
+
+ /* Parse command line options */
+ if (argc == 1) {
+
+ /* No arguments supplied. Run main test routines if
+ * using sec2 or stdio driver, otherwise don't run
+ * anything. */
+
+ /* Determine driver being used */
+ envval = HDgetenv("HDF5_DRIVER");
+ if(envval == NULL)
+ envval = "";
+
+ if (!HDstrcmp(envval, "sec2") || !HDstrcmp(envval, "stdio") || !HDstrcmp(envval, "")) {
+
+ if (test_flush() != SUCCEED) TEST_ERROR;
+ if (test_refresh() != SUCCEED) TEST_ERROR;
+
+ } /* end if */
+
+ else {
+
+ HDfprintf(stdout, "Skipping all flush/refresh tests (only run with sec2 or stdio file drivers).\n");
+
+ /* Test script is expecting some signals, so send them out to end it. */
+ if (end_verification() < 0) TEST_ERROR;
+ if (end_verification() < 0) TEST_ERROR;
+
+ } /* end else */
+
+ } else if (argc == 3) {
+
+ /* Two arguments supplied. Pass them to flush verification routine. */
+
+ if (flush_verification(argv[1], argv[2]) != 0) PROCESS_ERROR;
+
+ } else if (argc == 2) {
+
+ /* One argument supplied. Pass it to refresh verification routine. */
+
+ if (refresh_verification(argv[1]) != 0) PROCESS_ERROR;
+
+ } else {
+
+ /* Illegal number of arguments supplied. Error. */
+
+ HDfprintf(stderr, "Error. %d is an Invalid number of arguments to main().\n", argc);
+ PROCESS_ERROR
+
+ } /* end if */
+
+ return SUCCEED;
+
+error:
+
+ /* Return */
+ return FAIL;
+
+} /* main */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_flush
+ *
+ * Purpose: This function tests flushing individual objects' metadata
+ * from the metadata cache.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 1, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t test_flush(void)
+{
+ /**************************************************************************
+ *
+ * Test Description:
+ *
+ * This test will build an HDF5 file with several objects in a varying
+ * hierarchical layout. It will then attempt to flush the objects
+ * in the file one by one, individually, using the four H5*flush
+ * routines (D,G,T, and O). After each call to either create or flush an
+ * object, a series of verifications will occur on each object in the file.
+ *
+ * Each verification consists of spawning off a new process and determining
+ * if the object can be opened and its information retreived in said
+ * alternate process. It reports the results, which are compared to an
+ * expected value (either that the object can be found on disk, or that it
+ * cannot).
+ *
+ * Note that to spawn a verification, this program sends a signal (by creating
+ * a file on disk) to the test script controlling it, indicating how to
+ * run the verification.
+ *
+ * Implementation is funky, but basically, an example:
+ *
+ * Step 1. Dataset is created.
+ * Step 2. Verify that dataset can't be opened by separate process, as
+ * it should not have been flushed to disk yet.
+ * Step 3. Group is created.
+ * Step 4. Verify that group can't be opened by separate process.
+ * Step 5. H5Gflush is called on the group.
+ * Step 6. Verify that group CAN be opened, but dataset still has
+ * yet to hit disk, and CANNOT be opened. Success! Only the group
+ * was flushed.
+ *
+ **************************************************************************/
+
+ /**************************************************************************
+ * Generated Test File will look like this:
+ *
+ * GROUP "/"
+ * DATASET "Dataset1"
+ * GROUP "Group1" {
+ * DATASET "Dataset2"
+ * GROUP "Group2" {
+ * DATATYPE "CommittedDatatype3"
+ * }
+ * }
+ * GROUP "Group3" {
+ * DATASET "Dataset3"
+ * DATATYPE "CommittedDatatype2"
+ * }
+ * DATATYPE "CommittedDatatype1"
+ **************************************************************************/
+
+ /* Variables */
+ hid_t fid,gid,gid2,gid3,sid,tid1,tid2,tid3,did,did2,did3,rid,fapl,status = 0;
+ hsize_t dims[2] = {3,5};
+
+ /* Testing Message */
+ HDfprintf(stdout, "Testing individual object flush behavior:\n");
+
+ /* Cleanup any old error or signal files */
+ CLEANUP_FILES;
+
+ /* ================ */
+ /* CREATE TEST FILE */
+ /* ================ */
+
+ /* Create file, open root group - have to use latest file format for SWMR */
+ if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR;
+ if (H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR;
+ if ((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
+ if ((rid = H5Gopen2(fid, "/", H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create data space and types */
+ if ((sid = H5Screate_simple(2, dims, dims)) < 0) TEST_ERROR;
+ if ((tid1 = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR;
+ if ((tid2 = H5Tcopy(H5T_NATIVE_CHAR)) < 0) TEST_ERROR;
+ if ((tid3 = H5Tcopy(H5T_NATIVE_LONG)) < 0) TEST_ERROR;
+
+ /* Create Group1 */
+ if ((gid = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group2 */
+ if ((gid2 = H5Gcreate2(gid, "Group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group3 */
+ if ((gid3 = H5Gcreate2(fid, "Group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset1 */
+ if ((did = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset2 */
+ if ((did2 = H5Dcreate2(gid, "Dataset2", tid3, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset3 */
+ if ((did3 = H5Dcreate2(gid3, "Dataset3", tid2, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype1 */
+ if ((status = H5Tcommit2(fid, "CommittedDatatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype2 */
+ if ((status = H5Tcommit2(gid2, "CommittedDatatype2", tid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype3 */
+ if ((status = H5Tcommit2(gid3, "CommittedDatatype3", tid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* ============ */
+ /* FLUSH GROUPS */
+ /* ============ */
+
+ /* Test */
+ TESTING("to ensure H5Gflush correctly flushes single groups");
+
+ /* First, let's verify that nothing is currently flushed. */
+ if (run_flush_verification_process(RG, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Then, flush the root group and verify it's the only thing on disk */
+ if ((status = H5Gflush(rid)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Group1 and Verify it is recently flushed, and nothing
+ * else has changed. */
+ if ((status = H5Gflush(gid)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Group2 and Verify it is recently flushed, and nothing
+ * else has changed. */
+ if ((status = H5Gflush(gid2)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ============== */
+ /* FLUSH DATASETS */
+ /* ============== */
+
+ /* Test */
+ TESTING("to ensure H5Dflush correctly flushes single datasets");
+
+ /* Flush Dataset1 and verify it's the only thing that hits disk. */
+ if ((status = H5Dflush(did)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Dataset2 and verify it's the only thing that hits disk. */
+ if ((status = H5Dflush(did2)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* =============== */
+ /* FLUSH DATATYPES */
+ /* =============== */
+
+ /* Test */
+ TESTING("to ensure H5Tflush correctly flushes single datatypes");
+
+ /* Flush Datatype 1 and verify it's the only thing that hits disk. */
+ if ((status = H5Tflush(tid1)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Datatype 2 and verify it's the only thing that hits disk. */
+ if ((status = H5Tflush(tid2)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ============= */
+ /* FLUSH OBJECTS */
+ /* ============= */
+
+ /* Test */
+ TESTING("to ensure H5Oflush correctly flushes single objects");
+
+ /* Flush Group3 and verify it's the only thing that hits disk. */
+ if ((status = H5Oflush(gid3)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Dataset3 and verify it's the only thing that hits disk. */
+ if ((status = H5Oflush(did3)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush CommittedDatatype3 and verify it's the only thing that hits disk. */
+ if ((status = H5Oflush(tid3)) < 0) TEST_ERROR;
+ if (run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(G3, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(D3, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if (run_flush_verification_process(T3, FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ================== */
+ /* Cleanup and Return */
+ /* ================== */
+ if (H5Pclose(fapl) < 0) TEST_ERROR;
+ if (H5Gclose(gid) < 0) TEST_ERROR;
+ if (H5Gclose(gid2) < 0) TEST_ERROR;
+ if (H5Dclose(did) < 0) TEST_ERROR;
+ if (H5Dclose(did2) < 0) TEST_ERROR;
+ if (H5Gclose(rid) < 0) TEST_ERROR;
+ if (H5Fclose(fid) < 0) TEST_ERROR;
+
+ /* Delete test file */
+ HDremove(FILENAME);
+
+ if (end_verification() < 0) TEST_ERROR;
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* end test_flush */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_refresh
+ *
+ * Purpose: This function tests refresh (evict/reload) of individual
+ * objects' metadata from the metadata cache.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * August 17, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t test_refresh(void)
+{
+ /**************************************************************************
+ *
+ * Test Description:
+ *
+ * This test will build an HDF5 file with several objects in a varying
+ * hierarchical layout. It will then flush the entire file to disk. Then,
+ * an attribute will be added to each object in the file.
+ *
+ * One by one, this process will flush each object to disk, individually.
+ * It will also be coordinating with another process, which will open
+ * the object before it is flushed by this process, and then refresh the
+ * object after it's been flushed, comparing the before and after object
+ * information to ensure that they are as expected. (i.e., most notably,
+ * that an attribute has been added, and is only visible after a
+ * successful call to a H5*refresh function).
+ *
+ * As with the flush case, the implemention is a bit tricky as it's
+ * dealing with signals going back and forth between the two processes
+ * to ensure the timing is correct, but basically, an example:
+ *
+ * Step 1. Dataset is created.
+ * Step 2. Dataset is flushed.
+ * Step 3. Attribute on Dataset is created.
+ * Step 4. Another process opens the dataset and verifies that it does
+ * not see an attribute (as the attribute hasn't been flushed yet).
+ * Step 5. This process flushes the dataset again (with Attribute attached).
+ * Step 6. The other process calls H5Drefresh, which should evict/reload
+ * the object's metadata, and thus pick up the attribute that's
+ * attached to it. Most other before/after object information is
+ * compared for sanity as well.
+ * Step 7. Rinse and Repeat for each object in the file.
+ *
+ **************************************************************************/
+
+ /**************************************************************************
+ * Generated Test File will look like this:
+ *
+ * GROUP "/"
+ * DATASET "Dataset1"
+ * GROUP "Group1" {
+ * DATASET "Dataset2"
+ * GROUP "Group2" {
+ * DATATYPE "CommittedDatatype3"
+ * }
+ * }
+ * GROUP "Group3" {
+ * DATASET "Dataset3"
+ * DATATYPE "CommittedDatatype2"
+ * }
+ * DATATYPE "CommittedDatatype1"
+ **************************************************************************/
+
+ /* Variables */
+ hid_t aid,fid,sid,tid1,did,dcpl,fapl = 0;
+ hid_t gid,gid2,gid3,tid2,tid3,did2,did3;
+ herr_t status = 0;
+ hsize_t dims[2] = {50,50};
+ hsize_t cdims[2] = {1,1};
+ int fillval = 2;
+
+ /* Testing Message */
+ HDfprintf(stdout, "Testing individual object refresh behavior:\n");
+
+ /* Cleanup any old error or signal files */
+ CLEANUP_FILES;
+
+ /* ================ */
+ /* CREATE TEST FILE */
+ /* ================ */
+
+ /* Create File */
+ if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR;
+ if (H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR;
+ if ((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
+
+ /* Create data space and types */
+ if ((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR;
+ if ( H5Pset_chunk(dcpl, 2, cdims) < 0 ) TEST_ERROR;
+ if ( H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval) < 0 ) TEST_ERROR;
+ if ((sid = H5Screate_simple(2, dims, dims)) < 0) TEST_ERROR;
+ if ((tid1 = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR;
+ if ((tid2 = H5Tcopy(H5T_NATIVE_CHAR)) < 0) TEST_ERROR;
+ if ((tid3 = H5Tcopy(H5T_NATIVE_LONG)) < 0) TEST_ERROR;
+
+ /* Create Group1 */
+ if ((gid = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group2 */
+ if ((gid2 = H5Gcreate2(gid, "Group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group3 */
+ if ((gid3 = H5Gcreate2(fid, "Group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset1 */
+ if ((did = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset2 */
+ if ((did2 = H5Dcreate2(gid, "Dataset2", tid3, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset3 */
+ if ((did3 = H5Dcreate2(gid3, "Dataset3", tid2, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype1 */
+ if ((status = H5Tcommit2(fid, "CommittedDatatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype2 */
+ if ((status = H5Tcommit2(gid2, "CommittedDatatype2", tid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype3 */
+ if ((status = H5Tcommit2(gid3, "CommittedDatatype3", tid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Flush File to Disk */
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) TEST_ERROR;
+
+ /* Create an attribute on each object. These will not immediately hit disk,
+ and thus be unavailable to another process until this process flushes
+ the object and the other process refreshes from disk. */
+ if ((aid = H5Acreate2(did, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(did2, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(did3, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(gid, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(gid2, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(gid3, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(tid1, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(tid2, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+ if ((aid = H5Acreate2(tid3, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if (H5Aclose(aid) < 0) TEST_ERROR;
+
+ /* ================ */
+ /* Refresh Datasets */
+ /* ================ */
+
+ TESTING("to ensure that H5Drefresh correctly refreshes single datasets");
+
+ /* Verify First Dataset can be refreshed with H5Drefresh */
+ if (start_refresh_verification_process(D1) != 0) TEST_ERROR;
+ if (H5Oflush(did) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Second Dataset can be refreshed with H5Drefresh */
+ if (start_refresh_verification_process(D2) != 0) TEST_ERROR;
+ if (H5Oflush(did2) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ============== */
+ /* Refresh Groups */
+ /* ============== */
+
+ TESTING("to ensure that H5Grefresh correctly refreshes single groups");
+
+ /* Verify First Group can be refreshed with H5Grefresh */
+ if (start_refresh_verification_process(G1) != 0) TEST_ERROR;
+ if (H5Oflush(gid) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Second Group can be refreshed with H5Grefresh */
+ if (start_refresh_verification_process(G2) != 0) TEST_ERROR;
+ if (H5Oflush(gid2) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ================= */
+ /* Refresh Datatypes */
+ /* ================= */
+
+ TESTING("to ensure that H5Trefresh correctly refreshes single datatypes");
+
+ /* Verify First Committed Datatype can be refreshed with H5Trefresh */
+ if (start_refresh_verification_process(T1) != 0) TEST_ERROR;
+ if (H5Oflush(tid1) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Second Committed Datatype can be refreshed with H5Trefresh */
+ if (start_refresh_verification_process(T2) != 0) TEST_ERROR;
+ if (H5Oflush(tid2) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* =============== */
+ /* Refresh Objects */
+ /* =============== */
+
+ TESTING("to ensure that H5Orefresh correctly refreshes single objects");
+
+ /* Verify Third Dataset can be refreshed with H5Orefresh */
+ if (start_refresh_verification_process(D3) != 0) TEST_ERROR;
+ if (H5Oflush(did3) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Third Group can be refreshed with H5Orefresh */
+ if (start_refresh_verification_process(G3) != 0) TEST_ERROR;
+ if (H5Oflush(gid3) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Third Committed Datatype can be refreshed with H5Orefresh */
+ if (start_refresh_verification_process(T3) != 0) TEST_ERROR;
+ if (H5Oflush(tid3) < 0) TEST_ERROR;
+ if (end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ================== */
+ /* Cleanup and Return */
+ /* ================== */
+
+ /* Close Stuff */
+ if (H5Pclose(fapl) < 0) TEST_ERROR;
+ if (H5Pclose(dcpl) < 0) TEST_ERROR;
+ if (H5Tclose(tid1) < 0) TEST_ERROR;
+ if (H5Tclose(tid2) < 0) TEST_ERROR;
+ if (H5Tclose(tid3) < 0) TEST_ERROR;
+ if (H5Dclose(did) < 0) TEST_ERROR;
+ if (H5Dclose(did2) < 0) TEST_ERROR;
+ if (H5Dclose(did3) < 0) TEST_ERROR;
+ if (H5Gclose(gid) < 0) TEST_ERROR;
+ if (H5Gclose(gid2) < 0) TEST_ERROR;
+ if (H5Gclose(gid3) < 0) TEST_ERROR;
+ if (H5Sclose(sid) < 0) TEST_ERROR;
+ if (H5Fclose(fid) < 0) TEST_ERROR;
+
+ /* Delete Test File */
+ HDremove(FILENAME);
+
+ if (end_verification() < 0) TEST_ERROR;
+
+ return SUCCEED;
+
+error:
+ /* Return */
+ return FAIL;
+
+} /* test_refresh() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: run_flush_verification_process
+ *
+ * Purpose: This function is used to communicate with the test script
+ * in order to spawn off a process to verify that a flush
+ * of an individual object was successful.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t run_flush_verification_process(const char * obj_pathname, const char * expected)
+{
+ /* Send Signal to SCRIPT indicating that it should kick off a verification process. */
+ send_signal(SIGNAL_TO_SCRIPT, obj_pathname, expected);
+
+ /* Wait for Signal from SCRIPT indicating that verification process has completed. */
+ if (wait_for_signal(SIGNAL_FROM_SCRIPT) < 0) TEST_ERROR;
+
+ /* Check to see if any errors occurred */
+ if (check_for_errors() < 0) TEST_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* run_flush_verification_process */
+
+
+/*-------------------------------------------------------------------------
+ * Function: flush_verification
+ *
+ * Purpose: This function tries to open target object in the test file.
+ * It compares the success of the open function to the expected
+ * value, and succeeds if they are equal and fails if they differ.
+ *
+ * Note that full path to the object must be provided as the
+ * obj_pathname argument.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t flush_verification(const char * obj_pathname, const char * expected)
+{
+ /* Variables */
+ hid_t oid = -1, fid = -1;
+ herr_t status = 0;
+ H5O_info_t oinfo;
+
+ /* Try to open the testfile and then obj_pathname within the file */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(FILENAME, H5F_ACC_SWMR_READ, H5P_DEFAULT);
+ oid = H5Oopen(fid, obj_pathname, H5P_DEFAULT);
+ status = H5Oget_info(oid, &oinfo);
+ } H5E_END_TRY;
+
+ /* Compare to expected result */
+ if (strcmp(expected, FLUSHED) == 0) {
+ if ((oid < 0) || (status < 0)) {
+ HDfprintf(stderr, "Error! %s should be on disk, but was NOT!\n", obj_pathname);
+ PROCESS_ERROR;
+ } /* end if */
+ } else if (strcmp(expected, NOT_FLUSHED) == 0) {
+ if ((oid > 0) || (status > 0)) {
+ HDfprintf(stderr, "Error! %s not expected to be flushed, but it was found on disk!\n", obj_pathname);
+ PROCESS_ERROR;
+ } /* end if */
+ } else {
+ HDfprintf(stderr, "Error! Bad verification parameters. %s is an invalid expected outcome.\n", expected);
+ PROCESS_ERROR;
+ } /* end if */
+
+ /* Cleanup */
+ H5E_BEGIN_TRY {
+ H5Oclose(oid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* flush_verification */
+
+
+/*-------------------------------------------------------------------------
+ * Function: start_refresh_verification_process
+ *
+ * Purpose: This function is used to communicate with the test script
+ * in order to spawn off a process which will test the
+ * H5*refresh routine.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t start_refresh_verification_process(const char * obj_pathname)
+{
+ /* Send Signal to SCRIPT indicating that it should kick off a refresh
+ verification process */
+ send_signal(SIGNAL_TO_SCRIPT, obj_pathname, NULL);
+
+ /* Wait for Signal from VERIFICATION PROCESS indicating that it's opened the
+ target object and ready for MAIN PROCESS to modify it */
+ if (wait_for_signal(SIGNAL_BETWEEN_PROCESSES_1) < 0) TEST_ERROR;
+
+ /* Check to see if any errors occurred */
+ if (check_for_errors() < 0) TEST_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* start_refresh_verification_process */
+
+
+/*-------------------------------------------------------------------------
+ * Function: end_refresh_verification_process
+ *
+ * Purpose: This function is used to communicate with the verification
+ * process spawned by the start_refresh_verification_process
+ * function. It gives it the go-ahead to call H5*refresh
+ * on an object and conlcude the refresh verification.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t end_refresh_verification_process(void)
+{
+ /* Send Signal to REFRESH VERIFICATION PROCESS indicating that the object
+ has been modified and it should now attempt to refresh its metadata,
+ and verify the results. */
+ send_signal(SIGNAL_BETWEEN_PROCESSES_2, NULL, NULL);
+
+ /* Wait for Signal from SCRIPT indicating that the refresh verification
+ process has completed. */
+ if (wait_for_signal(SIGNAL_FROM_SCRIPT) < 0) TEST_ERROR;
+
+ /* Check to see if any errors occurred */
+ if (check_for_errors() < 0) TEST_ERROR;
+
+ /* Return */
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* end_refresh_verification_process */
+
+
+/*-------------------------------------------------------------------------
+ * Function: refresh_verification
+ *
+ * Purpose: This function opens the specified object, and checks to see
+ * that is does not have any attributes on it. It then sends
+ * a signal to the main process, which will flush the object
+ * (putting an attribute on the object on disk). This function
+ * will then refresh the object, and verify that it has picked
+ * up the new metadata reflective of the added attribute.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t refresh_verification(const char * obj_pathname)
+{
+ /* Variables */
+ hid_t oid,fid,status = 0;
+ H5O_info_t flushed_oinfo;
+ H5O_info_t refreshed_oinfo;
+
+ /* Open Object */
+ if ((fid = H5Fopen(FILENAME, H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0) PROCESS_ERROR;
+ if ((oid = H5Oopen(fid, obj_pathname, H5P_DEFAULT)) < 0) PROCESS_ERROR;
+
+ /* Get Object info */
+ if ((status = H5Oget_info(oid, &flushed_oinfo)) < 0) PROCESS_ERROR;
+
+ /* Make sure there are no attributes on the object. This is just a sanity
+ check to ensure we didn't erroneously flush the attribute before
+ starting the verification. */
+ if (flushed_oinfo.num_attrs != 0) PROCESS_ERROR;
+
+ /* Send Signal to MAIN PROCESS indicating that it can go ahead and modify the
+ object. */
+ send_signal(SIGNAL_BETWEEN_PROCESSES_1, NULL, NULL);
+
+ /* Wait for Signal from MAIN PROCESS indicating that it's modified the
+ object and we can run verification now. */
+ if (wait_for_signal(SIGNAL_BETWEEN_PROCESSES_2) < 0) PROCESS_ERROR;
+
+ /* Get object info again. This will NOT reflect what's on disk, only what's
+ in the cache. Thus, all values will be unchanged from above, despite
+ newer information being on disk. */
+ if ((status = H5Oget_info(oid, &refreshed_oinfo)) < 0) PROCESS_ERROR;
+
+ /* Verify that before doing a refresh, getting the object info returns stale
+ information. (i.e., unchanged from above, despite new info on disk). */
+ if (flushed_oinfo.addr != refreshed_oinfo.addr) PROCESS_ERROR;
+ if (flushed_oinfo.type != refreshed_oinfo.type) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.version != refreshed_oinfo.hdr.version) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.flags != refreshed_oinfo.hdr.flags) PROCESS_ERROR;
+ if (flushed_oinfo.num_attrs != refreshed_oinfo.num_attrs) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.nmesgs != refreshed_oinfo.hdr.nmesgs) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.nchunks != refreshed_oinfo.hdr.nchunks) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.space.total != refreshed_oinfo.hdr.space.total) PROCESS_ERROR;
+
+ /* Refresh object */
+ /* The H5*refresh function called depends on which object we are trying
+ * to refresh. (MIKE: add desired refresh call as parameter so adding new
+ * test cases is easy). */
+ if ((strcmp(obj_pathname, D1) == 0) ||
+ (strcmp(obj_pathname, D2) == 0)) {
+ if (H5Drefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else if ((strcmp(obj_pathname, G1) == 0) ||
+ (strcmp(obj_pathname, G2) == 0)) {
+ if (H5Grefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else if ((strcmp(obj_pathname, T1) == 0) ||
+ (strcmp(obj_pathname, T2) == 0)) {
+ if (H5Trefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else if ((strcmp(obj_pathname, D3) == 0) ||
+ (strcmp(obj_pathname, G3) == 0) ||
+ (strcmp(obj_pathname, T3) == 0)) {
+ if (H5Orefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else {
+ HDfprintf(stdout, "Error. %s is an unrecognized object.\n", obj_pathname);
+ PROCESS_ERROR;
+ } /* end else */
+
+ /* Get object info. This should now accurately reflect the refreshed object on disk. */
+ if ((status = H5Oget_info(oid, &refreshed_oinfo)) < 0) PROCESS_ERROR;
+
+ /* Confirm following attributes are the same: */
+ if (flushed_oinfo.addr != refreshed_oinfo.addr) PROCESS_ERROR;
+ if (flushed_oinfo.type != refreshed_oinfo.type) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.version != refreshed_oinfo.hdr.version) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.flags != refreshed_oinfo.hdr.flags) PROCESS_ERROR;
+
+ /* Confirm following attributes are different */
+ if (flushed_oinfo.num_attrs == refreshed_oinfo.num_attrs) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.nmesgs == refreshed_oinfo.hdr.nmesgs) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.nchunks == refreshed_oinfo.hdr.nchunks) PROCESS_ERROR;
+ if (flushed_oinfo.hdr.space.total == refreshed_oinfo.hdr.space.total) PROCESS_ERROR;
+
+ /* Close objects */
+ if (H5Oclose(oid) < 0) PROCESS_ERROR;
+ if (H5Fclose(fid) < 0) PROCESS_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+
+ return FAIL;
+
+} /* refresh_verification */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_for_errors()
+ *
+ * Purpose: This function checks the status of external verification
+ * processes to see if they've succeeded. It checks for the
+ * existance of flushrefresh_ERROR file. If present, that indicates
+ * an external verification process has failed, and this function
+ * thus fails as well. If not present, then nothing else has
+ * failed, and this function succeeds.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 1, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t check_for_errors(void)
+{
+ FILE * file;
+
+ if ((file = fopen(ERRFILE, "r")))
+ {
+ HDfclose(file);
+ HDremove(ERRFILE);
+ return FAIL;
+ }
+
+ return SUCCEED;
+
+} /* check_for_errors */
+
+
+/*-------------------------------------------------------------------------
+ * Function: end_verification
+ *
+ * Purpose: Tells test script that verification routines are completed and
+ * that the test can wrap up.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t end_verification(void)
+{
+ /* Send Signal to SCRIPT to indicate that we're done with verification. */
+ send_signal(SIGNAL_TO_SCRIPT, "VERIFICATION_DONE", "VERIFICATION_DONE");
+
+ /* Wait for Signal from SCRIPT indicating that we can continue. */
+ if (wait_for_signal(SIGNAL_FROM_SCRIPT) < 0) TEST_ERROR;
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* end_verification */
+
+
+/*-------------------------------------------------------------------------
+ * Function: send_signal
+ *
+ * Purpose: Sends the specified signal.
+ *
+ * In terms of this test framework, a signal consists of a file
+ * on disk. Since there are multiple processes that need to
+ * communicate with each other, they do so by writing and
+ * reading signal files on disk, the names and contents of
+ * which are used to inform a process about when it can
+ * proceed and what it should do next.
+ *
+ * This function writes a signal file. The first argument is
+ * the name of the signal file, and the second and third
+ * arguments are the contents of the first two lines of the
+ * signal file. The last two arguments may be NULL.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * August 18, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void send_signal(const char * send, const char * arg1, const char * arg2)
+{
+
+ FILE *signalfile = NULL;
+
+ /* Create signal file (which will send signal to some other process) */
+ signalfile = fopen(send, "w+");
+
+ /* Write messages to signal file, if provided */
+ if (arg2 != NULL) {
+ HDassert(arg1);
+ HDfprintf(signalfile, "%s\n%s\n", arg1, arg2);
+ } /* end if */
+ else if (arg1 != NULL) {
+ HDassert(arg2 == NULL);
+ HDfprintf(signalfile, "%s\n", arg1);
+ } /* end if */
+ else {
+ HDassert(arg1 == NULL);
+ HDassert(arg2 == NULL);
+ }/* end else */
+
+ HDfflush(signalfile);
+ HDfclose(signalfile);
+
+} /* send_signal */
+
+
+/*-------------------------------------------------------------------------
+ * Function: wait_for_signal
+ *
+ * Purpose: Waits for the specified signal.
+ *
+ * In terms of this test framework, a signal consists of a file
+ * on disk. Since there are multiple processes that need to
+ * communicate with each other, they do so by writing and
+ * reading signal files on disk, the names and contents of
+ * which are used to inform a process about when it can
+ * proceed and what it should do next.
+ *
+ * This function continuously attempts to read the specified
+ * signal file from disk, and only continues once it has
+ * successfully done so (i.e., only after another process has
+ * called the "send_signal" function to write the signal file).
+ * This functon will then immediately remove the file (i.e.,
+ * to indicate that it has been received and can be reused),
+ * and then exits, allowing the calling function to continue.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * August 18, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t wait_for_signal(const char * waitfor)
+{
+ FILE *returnfile;
+ time_t t0,t1;
+
+ /* Start timer. If this function runs for too long (i.e.,
+ expected signal is never received), it will
+ return failure */
+ time(&t0);
+
+ /* Wait for return signal from some other process */
+ while ((returnfile = fopen(waitfor, "r")) == NULL) {
+
+ /* make note of current time. */
+ time(&t1);
+
+ /* If we've been waiting for a signal for too long, then
+ it was likely never sent and we should fail rather
+ than loop infinitely */
+ if (difftime(t1,t0) > SIGNAL_TIMEOUT) {
+ HDfprintf(stdout, "Error communicating between processes. Make sure test script is running.\n");
+ TEST_ERROR;
+ } /* end if */
+
+ } /* end while */
+
+ HDfclose(returnfile);
+ HDunlink(waitfor);
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+
+} /* wait_for_signal */
diff --git a/test/gen_idx.c b/test/gen_idx.c
new file mode 100644
index 0000000..8c24198
--- /dev/null
+++ b/test/gen_idx.c
@@ -0,0 +1,126 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose: This program is run to generate an HDF5 data file with datasets
+ * that use Fixed Array indexing method.
+ *
+ * To test compatibility, compile and run this program
+ * which will generate a file called "fixed_idx.h5".
+ * Move it to the test directory in the HDF5 v1.6/1.8 source tree.
+ * The test: test_idx_compatible() in dsets.c will read it.
+ */
+#include <assert.h>
+#include "hdf5.h"
+
+const char *FILENAME[1] = {
+ "fixed_idx.h5" /* file with datasets that use Fixed Array indexing method */
+};
+
+#define DSET "dset"
+#define DSET_FILTER "dset_filter"
+
+/*
+ * Function: gen_idx_file
+ *
+ * Purpose: Create a file with datasets that use Fixed Array indexing:
+ * one dataset: fixed dimension, chunked layout, w/o filters
+ * one dataset: fixed dimension, chunked layout, w/ filters
+ *
+ */
+static void gen_idx_file(void)
+{
+ hid_t fapl; /* file access property id */
+ hid_t fid; /* file id */
+ hid_t sid; /* space id */
+ hid_t dcpl; /* dataset creation property id */
+ hid_t did, did2; /* dataset id */
+ hsize_t dims[1] = {10}; /* dataset dimension */
+ hsize_t c_dims[1] = {2}; /* chunk dimension */
+ herr_t status; /* return status */
+ int i; /* local index variable */
+ int buf[10]; /* data buffer */
+
+
+ /* Get a copy of the file aaccess property */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ assert(fapl >= 0);
+
+ /* Set the "use the latest format" bounds for creating objects in the file */
+ status = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ assert(status >= 0);
+
+ /* Create dataset */
+ fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ assert(fid >= 0);
+
+ /* Create data */
+ for(i = 0; i < 10; i++)
+ buf[i] = i;
+
+ /* Set chunk */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ assert(dcpl >= 0);
+ status = H5Pset_chunk(dcpl, 1, c_dims);
+ assert(status >= 0);
+
+ sid = H5Screate_simple(1, dims, NULL);
+ assert(sid >= 0);
+
+ /* Create a 1D dataset */
+ did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ assert(did >= 0);
+
+ /* Write to the dataset */
+ status = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ assert(status >= 0);
+
+#if defined (H5_HAVE_FILTER_DEFLATE)
+ /* set deflate data */
+ status = H5Pset_deflate(dcpl, 9);
+ assert(status >= 0);
+
+ /* Create and write the dataset */
+ did2 = H5Dcreate2(fid, DSET_FILTER, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ assert(did2 >= 0);
+
+ status = H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ assert(status >= 0);
+
+ /* Close the dataset */
+ status = H5Dclose(did2);
+ assert(status >= 0);
+#endif
+
+ /* closing */
+ status = H5Dclose(did);
+ assert(status >= 0);
+ status = H5Sclose(sid);
+ assert(status >= 0);
+ status = H5Pclose(dcpl);
+ assert(status >= 0);
+ status = H5Pclose(fapl);
+ assert(status >= 0);
+ status = H5Fclose(fid);
+ assert(status >= 0);
+} /* gen_idx_file() */
+
+int main(void)
+{
+ gen_idx_file();
+
+ return 0;
+}
+
diff --git a/test/gen_plist.c b/test/gen_plist.c
index f071733..ae49d81 100644
--- a/test/gen_plist.c
+++ b/test/gen_plist.c
@@ -34,7 +34,7 @@ main(void)
hid_t dapl1; /* dataset access prop. list */
hid_t dxpl1; /* dataset xfer prop. list */
hid_t gcpl1; /* group create prop. list */
- hid_t ocpypl1; /* object copy prop. list */
+ hid_t ocpypl1; /* object copy prop. list */
hid_t ocpl1; /* object create prop. list */
hid_t lcpl1; /* link create prop. list */
hid_t lapl1; /* link access prop. list */
@@ -89,7 +89,7 @@ main(void)
/* check endianess */
{
- short int word = 0x0001;
+ short int word = 0x0000;
char *byte = (char *) &word;
if(byte[0] == 1)
diff --git a/test/group_old.h5 b/test/group_old.h5
index 58f66f8..2440103 100644
--- a/test/group_old.h5
+++ b/test/group_old.h5
Binary files differ
diff --git a/test/h5test.c b/test/h5test.c
index 843ec35..cbe067f 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -1263,8 +1263,6 @@ getenv_all(MPI_Comm comm, int root, const char* name)
* Programmer: Larry Knox
* Monday, October 13, 2009
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
int
@@ -1276,20 +1274,22 @@ h5_make_local_copy(const char *origfilename, const char *local_copy_name)
const char *filename = H5_get_srcdir_filename(origfilename);; /* Get the test file name to copy */
/* Copy old file into temporary file */
- if((fd_old = HDopen(filename, O_RDONLY, 0666)) < 0) return -1;
- if((fd_new = HDopen(local_copy_name, O_RDWR|O_CREAT|O_TRUNC, 0666))
- < 0) return -1;
+ if((fd_old = HDopen(filename, O_RDONLY, 0666)) < 0)
+ return -1;
+ if((fd_new = HDopen(local_copy_name, O_RDWR|O_CREAT|O_TRUNC, 0666)) < 0)
+ return -1;
/* Copy data */
while((nread = HDread(fd_old, buf, (size_t)READ_BUF_SIZE)) > 0)
- HDwrite(fd_new, buf, (size_t)nread);
+ if(HDwrite(fd_new, buf, (size_t)nread) < 0)
+ return -1;
/* Close files */
if(HDclose(fd_old) < 0) return -1;
if(HDclose(fd_new) < 0) return -1;
return 0;
-}
+} /* end h5_make_local_copy() */
/*-------------------------------------------------------------------------
@@ -1379,3 +1379,56 @@ error:
return -1;
}
+/*
+ * To send a message by creating the file.
+ * This is a helper routine used in:
+ * 1) tfile.c: test_file_lock_concur() and test_file_lock_swmr_concur()
+ * 2) use_common.c
+ * 3) swmr_addrme_writer.c, swmr_remove_writer.c, swmr_sparse_writer.c, swmr_writer.c
+ */
+void
+h5_send_message(const char *file)
+{
+ FILE *id;
+
+ id = HDfopen(file, "w+");
+ HDfclose(id);
+} /* h5_send_message() */
+
+/*
+ * Repeatedly check for the message file.
+ * It will stop when the file exists or exceeds the timeout limit.
+ * This is a helper routine used in:
+ * 1) tfile.c: test_file_lock_concur() and test_file_lock_swmr_concur()
+ * 2) use_common.c
+ */
+int
+h5_wait_message(const char *file)
+{
+ FILE *id; /* File pointer */
+ time_t t0, t1; /* Time info */
+
+ /* Start timer */
+ HDtime(&t0);
+
+ /* Repeatedly check whether the file exists */
+ while((id = HDfopen(file, "r")) == NULL) {
+ /* Get current time */
+ HDtime(&t1);
+ /*
+ * Determine time difference--
+ * if waiting too long for the message, then it is
+ * unlikely the message will get sent, then fail rather
+ * than loop forever.
+ */
+ if(HDdifftime(t1, t0) > MESSAGE_TIMEOUT)
+ goto done;
+ }
+
+ if(id != NULL) HDfclose(id);
+ HDunlink(file);
+ return(1);
+
+done:
+ return(-1);
+} /* h5_wait_message() */
diff --git a/test/h5test.h b/test/h5test.h
index ca0eead..a91da5d 100644
--- a/test/h5test.h
+++ b/test/h5test.h
@@ -118,6 +118,9 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */
#define ALARM_ON TestAlarmOn()
#define ALARM_OFF HDalarm(0)
+/* The # of seconds to wait for the message file--used by h5_wait_message() */
+#define MESSAGE_TIMEOUT 300 /* Timeout in seconds */
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -179,6 +182,9 @@ H5TEST_DLL char* getenv_all(MPI_Comm comm, int root, const char* name);
/* Extern global variables */
H5TEST_DLLVAR int TestVerbosity;
+H5TEST_DLL void h5_send_message(const char *file);
+H5TEST_DLL int h5_wait_message(const char *file);
+
#ifdef __cplusplus
}
#endif
diff --git a/test/links.c b/test/links.c
index db5922b..7e63bfa 100644
--- a/test/links.c
+++ b/test/links.c
@@ -27,11 +27,15 @@
#define H5G_FRIEND /*suppress error about including H5Gpkg */
#define H5G_TESTING
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+
#include "h5test.h"
#include "H5srcdir.h"
-#include "H5Gpkg.h" /* Groups */
-#include "H5Iprivate.h" /* IDs */
-#include "H5Lprivate.h" /* Links */
+#include "H5FDpkg.h" /* File drivers */
+#include "H5Gpkg.h" /* Groups */
+#include "H5Iprivate.h" /* IDs */
+#include "H5Lprivate.h" /* Links */
/* File for external link test. Created with gen_udlinks.c */
#define LINKED_FILE "be_extlink2.h5"
@@ -3848,7 +3852,6 @@ external_set_elink_fapl3(hbool_t new_format)
{
hid_t core_fapl = -1, stdio_fapl = -1;
hid_t lapl_id = -1, new_lapl_id = -1, l_fapl = -1, out_fapl;
- int ret;
if(new_format)
TESTING("H5Pset/get_fapl() (w/new group format)")
@@ -3907,11 +3910,11 @@ external_set_elink_fapl3(hbool_t new_format)
error:
H5E_BEGIN_TRY {
- H5Pclose(l_fapl);
- H5Pclose(lapl_id);
- H5Pclose(new_lapl_id);
- H5Pclose(core_fapl);
- H5Pclose(stdio_fapl);
+ H5Pclose(l_fapl);
+ H5Pclose(lapl_id);
+ H5Pclose(new_lapl_id);
+ H5Pclose(core_fapl);
+ H5Pclose(stdio_fapl);
} H5E_END_TRY;
return -1;
} /* end external_set_elink_fapl3() */
@@ -3933,13 +3936,15 @@ external_set_elink_fapl3(hbool_t new_format)
*-------------------------------------------------------------------------
*/
static int
-external_set_elink_acc_flags(hid_t fapl, hbool_t new_format)
+external_set_elink_acc_flags(const char H5_ATTR_UNUSED *env_h5_drvr,
+ hid_t fapl, hbool_t new_format)
{
hid_t file1 = -1, file2 = -1, group = -1, subgroup = -1, gapl = -1;
char filename1[NAME_BUF_SIZE],
filename2[NAME_BUF_SIZE];
herr_t ret;
unsigned flags;
+ char *driver = NULL; /* VFD string (from env variable) */
if(new_format)
TESTING("H5Pset/get_elink_acc_flags() (w/new group format)")
@@ -3992,6 +3997,15 @@ external_set_elink_acc_flags(hid_t fapl, hbool_t new_format)
} H5E_END_TRY;
if(subgroup != FAIL) TEST_ERROR
+ /* Attempt to set SWMR flags on gapl.
+ * This is just a smoke check of the flags. The actual external link
+ * functionality is tested in the SWMR tests.
+ */
+ /* Set SWMR reader flags on gapl */
+ if(H5Pset_elink_acc_flags(gapl, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ) < 0) TEST_ERROR
+ /* Set SWMR writer flags on gapl */
+ if(H5Pset_elink_acc_flags(gapl, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE) < 0) TEST_ERROR
+
/* Attempt to set invalid flags on gapl */
H5E_BEGIN_TRY {
ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_TRUNC);
@@ -4005,10 +4019,95 @@ external_set_elink_acc_flags(hid_t fapl, hbool_t new_format)
ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_CREAT);
} H5E_END_TRY;
if(ret != FAIL) TEST_ERROR
+ /* SWMR reader with write access */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_RDWR | H5F_ACC_SWMR_READ);
+ } H5E_END_TRY;
+ if(ret != FAIL) TEST_ERROR
+ /* SWMR writer with read-only access */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_elink_acc_flags(gapl, H5F_ACC_RDONLY | H5F_ACC_SWMR_WRITE);
+ } H5E_END_TRY;
+ if(ret != FAIL) TEST_ERROR
/* Close file1 */
if(H5Fclose(file1) < 0) TEST_ERROR
+ /* Only run this part with VFDs that support SWMR */
+ driver = HDgetenv("HDF5_DRIVER");
+ if(H5FD_supports_swmr_test(driver)) {
+
+ /* Reopen file1, with read-write and SWMR-write access */
+ /* Only supported under the latest file format */
+ if(new_format) {
+ if((file1 = H5Fopen(filename1, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Open a group through the external link using default gapl */
+ if((group = H5Gopen2(file1, "/ext_link/group", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Verify that the correct parameters have been set on file2 */
+ if((file2 = H5Iget_file_id(group)) < 0) FAIL_STACK_ERROR
+ if(H5Fget_intent(file2, &flags) < 0) FAIL_STACK_ERROR
+ if(flags != (H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE)) TEST_ERROR
+
+ /* Close file2 and group */
+ if(H5Gclose(group) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(file2) < 0) FAIL_STACK_ERROR
+
+ /* Set elink access flags on gapl to be H5F_ACC_RDWR (dropping SWMR_WRITE) */
+ if(H5Pset_elink_acc_flags(gapl, H5F_ACC_RDWR) < 0) FAIL_STACK_ERROR
+
+ /* Open a group through the external link using gapl */
+ if((group = H5Gopen2(file1, "/ext_link/group", gapl)) < 0) FAIL_STACK_ERROR
+
+ /* Verify that the correct parameters have been set on file2 */
+ if((file2 = H5Iget_file_id(group)) < 0) FAIL_STACK_ERROR
+ if(H5Fget_intent(file2, &flags) < 0) FAIL_STACK_ERROR
+ if(flags != H5F_ACC_RDWR) TEST_ERROR
+
+ /* Close file2 and group */
+ if(H5Gclose(group) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(file2) < 0) FAIL_STACK_ERROR
+
+ /* Close file1 */
+ if(H5Fclose(file1) < 0) TEST_ERROR
+ }
+
+ /* Reopen file1, with read-only and SWMR-read access */
+ if((file1 = H5Fopen(filename1, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Open a group through the external link using default gapl */
+ if((group = H5Gopen2(file1, "/ext_link/group", H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+
+ /* Verify that the correct parameters have been set on file2 */
+ if((file2 = H5Iget_file_id(group)) < 0) FAIL_STACK_ERROR
+ if(H5Fget_intent(file2, &flags) < 0) FAIL_STACK_ERROR
+ if(flags != (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ)) TEST_ERROR
+
+ /* Close file2 and group */
+ if(H5Gclose(group) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(file2) < 0) FAIL_STACK_ERROR
+
+ /* Set elink access flags on gapl to be H5F_ACC_RDWR (dropping SWMR_WRITE) */
+ if(H5Pset_elink_acc_flags(gapl, H5F_ACC_RDONLY) < 0) FAIL_STACK_ERROR
+
+ /* Open a group through the external link using gapl */
+ if((group = H5Gopen2(file1, "/ext_link/group", gapl)) < 0) FAIL_STACK_ERROR
+
+ /* Verify that the correct parameters have been set on file2 */
+ if((file2 = H5Iget_file_id(group)) < 0) FAIL_STACK_ERROR
+ if(H5Fget_intent(file2, &flags) < 0) FAIL_STACK_ERROR
+ if(flags != H5F_ACC_RDONLY) TEST_ERROR
+
+ /* Close file2 and group */
+ if(H5Gclose(group) < 0) FAIL_STACK_ERROR
+ if(H5Fclose(file2) < 0) FAIL_STACK_ERROR
+
+ /* Close file1 */
+ if(H5Fclose(file1) < 0) TEST_ERROR
+ } /* end if */
+
+
/* Verify that H5Fcreate and H5Fopen reject H5F_ACC_DEFAULT */
H5E_BEGIN_TRY {
file1 = H5Fcreate(filename1, H5F_ACC_DEFAULT, H5P_DEFAULT, fapl);
@@ -14545,7 +14644,7 @@ main(void)
/* This test cannot run with the EFC because the EFC cannot currently
* reopen a cached file with a different intent */
- nerrors += external_set_elink_acc_flags(my_fapl, new_format) < 0 ? 1 : 0;
+ nerrors += external_set_elink_acc_flags(env_h5_drvr, my_fapl, new_format) < 0 ? 1 : 0;
/* Try external link tests both with and without the external file cache
*/
diff --git a/test/mergemsg.h5 b/test/mergemsg.h5
index 3a9e352..55c3135 100644
--- a/test/mergemsg.h5
+++ b/test/mergemsg.h5
Binary files differ
diff --git a/test/multi_file_v16-s.h5 b/test/multi_file_v16-s.h5
index e990e95..2d4de48 100644
--- a/test/multi_file_v16-s.h5
+++ b/test/multi_file_v16-s.h5
Binary files differ
diff --git a/test/noencoder.h5 b/test/noencoder.h5
index 84f8752..6b973cd 100644
--- a/test/noencoder.h5
+++ b/test/noencoder.h5
Binary files differ
diff --git a/test/objcopy.c b/test/objcopy.c
index 82be446..4c10602 100644
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -40,7 +40,13 @@
#define H5P_TESTING
#include "H5Ppkg.h" /* Property Lists */
-#include "H5Dprivate.h" /* Datasets (for EFL property name) */
+/*
+ * This file needs to access private information from the H5D package.
+ * This file also needs to access the dataset testing code.
+ */
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h" /* Datasets */
const char *FILENAME[] = {
@@ -77,7 +83,13 @@ const char *FILENAME[] = {
#define NAME_DATASET_SIMPLE3 "dataset_simple_another_copy"
#define NAME_DATASET_COMPOUND "dataset_compound"
#define NAME_DATASET_CHUNKED "dataset_chunked"
+#define NAME_DATASET_CHUNKED_SINGLE "dataset_chunked_single"
#define NAME_DATASET_CHUNKED2 "dataset_chunked2"
+#define NAME_DATASET_CHUNKED2_SINGLE "dataset_chunked2_single"
+#define NAME_DATASET_CHUNKED3 "dataset_chunked3"
+#define NAME_DATASET_CHUNKED3_SINGLE "dataset_chunked3_single"
+#define NAME_DATASET_CHUNKED4 "dataset_chunked4"
+#define NAME_DATASET_CHUNKED4_SINGLE "dataset_chunked4_single"
#define NAME_DATASET_COMPACT "dataset_compact"
#define NAME_DATASET_EXTERNAL "dataset_ext"
#define NAME_DATASET_NAMED_DTYPE "dataset_named_dtype"
@@ -85,7 +97,9 @@ const char *FILENAME[] = {
#define NAME_DATASET_MULTI_OHDR "dataset_multi_ohdr"
#define NAME_DATASET_MULTI_OHDR2 "dataset_multi_ohdr2"
#define NAME_DATASET_VL "dataset_vl"
+#define NAME_DATASET_VL2 "dataset_vl2"
#define NAME_DATASET_VL_VL "dataset_vl_vl"
+#define NAME_DATASET_VL_VL2 "dataset_vl_vl2"
#define NAME_DATASET_CMPD_VL "dataset_cmpd_vl"
#define NAME_DATASET_SUB_SUB "/g0/g00/g000/dataset_simple"
#define NAME_GROUP_UNCOPIED "/uncopied"
@@ -121,12 +135,17 @@ const char *FILENAME[] = {
#define ATTR_NAME_LEN 80
#define DIM_SIZE_1 12
#define DIM_SIZE_2 6
+#define MAX_DIM_SIZE_1 100
+#define MAX_DIM_SIZE_2 80
#define CHUNK_SIZE_1 5 /* Not an even fraction of dimension sizes, so we test copying partial chunks */
#define CHUNK_SIZE_2 5
#define NUM_SUB_GROUPS 20
#define NUM_WIDE_LOOP_GROUPS 10
#define NUM_DATASETS 10
+#define COPY_OPEN_OBJ_NAME "CopyOpenObj"
+#define COPY_OPEN_OBJ_SIZE sizeof(hbool_t)
+
char src_obj_full_name[215]; /* the full path + name of the object to be copied */
unsigned num_attributes_g; /* Number of attributes created */
@@ -147,6 +166,8 @@ static int
compare_datasets(hid_t did, hid_t did2, hid_t pid, const void *wbuf);
static int
compare_groups(hid_t gid, hid_t gid2, hid_t pid, int depth, unsigned copy_flags);
+static int
+compare_idx_type(hid_t fapl, hid_t did, H5D_chunk_index_t new_type, H5D_chunk_index_t old_type);
/*-------------------------------------------------------------------------
@@ -1171,6 +1192,18 @@ compare_datasets(hid_t did, hid_t did2, hid_t pid, const void *wbuf)
void *rbuf2 = NULL; /* Buffer for reading raw data */
H5D_space_status_t space_status; /* Dataset's raw data space status */
H5D_space_status_t space_status2; /* Dataset's raw data space status */
+ hbool_t copy_open_obj = FALSE; /* Indicate if we are copying open objects */
+
+ /* Retrieve the private "copy open object" property from the property list, if it's non-DEFAULT */
+ if(pid != H5P_DEFAULT) {
+ htri_t copy_open_obj_exists;
+
+ if((copy_open_obj_exists = H5Pexist(pid, COPY_OPEN_OBJ_NAME)) < 0) TEST_ERROR
+ if(copy_open_obj_exists) {
+ if(H5Pget(pid, COPY_OPEN_OBJ_NAME, &copy_open_obj) < 0)
+ TEST_ERROR
+ } /* end if */
+ } /* end if */
/* Check the datatypes are equal */
@@ -1243,16 +1276,22 @@ compare_datasets(hid_t did, hid_t did2, hid_t pid, const void *wbuf)
if(offset1 != offset2) TEST_ERROR
if(size1 != size2) TEST_ERROR
- if(strcmp(name1, name2) != 0) TEST_ERROR
+ if(HDstrcmp(name1, name2) != 0) TEST_ERROR
}
- /* Remove external file information from the dcpls */
-
- /* reset external file information from the dcpls */
+ /* Reset external file information from the dcpls */
+ /* (Directly removing default property causes memory leak) */
if (H5P_reset_external_file_test(dcpl) < 0) TEST_ERROR
if (H5P_reset_external_file_test(dcpl2) < 0) TEST_ERROR
}
+ /* Check for copying open objects */
+ if(copy_open_obj) {
+ /* Reset layout information from the dcpls */
+ if(H5P_reset_layout_test(dcpl) < 0) TEST_ERROR
+ if(H5P_reset_layout_test(dcpl2) < 0) TEST_ERROR
+ } /* end if */
+
/* Compare the rest of the dataset creation property lists */
if(H5Pequal(dcpl, dcpl2) != TRUE) TEST_ERROR
@@ -1512,6 +1551,47 @@ error:
/*-------------------------------------------------------------------------
+ * Function: compare_idx_type
+ *
+ * Purpose: If using new format, the index array type should be NEW_TYPE
+ * If not, the index array type should be OLD_TYPE
+ *
+ * Return: TRUE if the index type retrieved for the dataset DID is
+ * as expected
+ * FALSE if not
+ *
+ * Programmer: Vailin Choi; August 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+compare_idx_type(hid_t fapl, hid_t did, H5D_chunk_index_t new_type, H5D_chunk_index_t old_type)
+{
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ H5F_libver_t low; /* File format low bound */
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(did, &idx_type) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if we are using the latest version of the format */
+ if(H5Pget_libver_bounds(fapl, &low, NULL) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify index type */
+ if(low == H5F_LIBVER_LATEST) {
+ if(idx_type != new_type)
+ TEST_ERROR
+ } else if(idx_type != old_type)
+ TEST_ERROR
+
+ return TRUE;
+error:
+ return FALSE;
+} /* compare_idx_type() */
+
+
+/*-------------------------------------------------------------------------
* Function: test_copy_named_datatype
*
* Purpose: Create name datatype in SRC file and copy it to DST file
@@ -2311,7 +2391,7 @@ test_copy_dataset_compound(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
#endif /* H5_CLEAR_MEMORY */
for(i = 0; i < DIM_SIZE_1; i++) {
buf[i].a = i;
- buf[i].d = 1.0F / (i + 1);
+ buf[i].d = (double)1.0F / (double)(i + 1);
} /* end for */
/* Initialize the filenames */
@@ -2443,9 +2523,9 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf1d[i] = (float)(i / 2.0F);
+ buf1d[i] = (float)i / 2.0F;
for(j = 0; j < DIM_SIZE_2; j++)
- buf2d[i][j] = (float)(i + (j / 100.0F));
+ buf2d[i][j] = (float)i + ((float)j / 100.0F);
} /* end for */
/* Initialize the filenames */
@@ -2487,6 +2567,36 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+ /*
+ * Create 1-D dataset: chunked, non-filterd, with data
+ * dims=max dims=chunk dims
+ * H5D_ALLOC_TIME_INC (default)
+ */
+ /* create 1-D dataspace */
+ if((sid = H5Screate_simple(1, dim1d, dim1d)) < 0) TEST_ERROR
+
+ /* create and set chunk plist */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 1, dim1d) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1d) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
/* Set 2-D dataspace dimensions */
dim2d[0] = DIM_SIZE_1;
dim2d[1] = DIM_SIZE_2;
@@ -2501,9 +2611,26 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* create dataset */
if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2d) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED3, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
/* close chunk plist */
if(H5Pclose(pid) < 0) TEST_ERROR
+
/* write data into file */
if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2d) < 0) TEST_ERROR
@@ -2516,6 +2643,84 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+ /*
+ * Create 2-D dataset: chunked, non-filterd, with data, dims=chunk dims,
+ * H5D_ALLOC_TIME_INC (default)
+ */
+
+ /* create 2-D dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* create and set chunk plist to be the same as dims2d */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, dim2d) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2d) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /*
+ * Create 2-D dataset: chunked, non-filterd, with data, dims=chunk dims,
+ * H5D_ALLOC_TIME_EARLY
+ */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED3_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2d) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /*
+ * Create 2-D dataset: chunked, non-filterd, with data, dims=max dims=chunk dims,
+ * H5D_ALLOC_TIME_LATE
+ */
+ /* create 2-D dataspace */
+ if((sid = H5Screate_simple(2, dim2d, dim2d)) < 0) TEST_ERROR
+
+ /* create and set chunk plist to be the same as dims2d */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, dim2d) < 0) TEST_ERROR
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_LATE) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED4_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2d) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -2532,6 +2737,12 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* copy the datasets from SRC to DST */
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2, fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED3, fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED_SINGLE, fid_dst, NAME_DATASET_CHUNKED_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2_SINGLE, fid_dst, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED3_SINGLE, fid_dst, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED4_SINGLE, fid_dst, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
/* open the dataset for copy */
if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
@@ -2539,6 +2750,30 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* open the 1-D destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_EARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf1d) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the 1-D destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, buf1d) != TRUE) TEST_ERROR
@@ -2554,6 +2789,87 @@ test_copy_dataset_chunked(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* open the destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf2d) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the 2-D dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_NONE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf2d) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the 2-D dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf2d) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the 2-D dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf2d) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the 2-D dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, buf2d) != TRUE) TEST_ERROR
@@ -2652,6 +2968,36 @@ test_copy_dataset_chunked_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+ /*
+ * create 1-D dataset: chunked, empty, non-filtered,
+ * dims=max dims=chunk dims, H5D_ALLOC_TIME_INC(default)
+ */
+
+ /* Set 1-D dataspace dimensions */
+ dim1d[0] = DIM_SIZE_1;
+
+ /* create 1-D dataspace */
+ if((sid = H5Screate_simple(1, dim1d, dim1d)) < 0) TEST_ERROR
+
+ /* create and set chunk plist */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 1, dim1d) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
/* Set 2-D dataspace dimensions */
dim2d[0] = DIM_SIZE_1;
dim2d[1] = DIM_SIZE_2;
@@ -2666,6 +3012,67 @@ test_copy_dataset_chunked_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* create dataset */
if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED3, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /*
+ * create 2-D dataset: chunked, empty, non-filtered,
+ * dims=chunk dims, H5D_ALLOC_TIME_INC (default)
+ */
+
+ /* Set 2-D dataspace dimensions */
+ dim2d[0] = DIM_SIZE_1;
+ dim2d[1] = DIM_SIZE_2;
+
+ /* create 2-D dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* create and set chunk plist */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, dim2d) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /*
+ * create 2-D dataset: chunked, empty, non-filtered, dims=chunk dims
+ * H5D_ALLOC_TIME_EARLY
+ */
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED3_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
/* close chunk plist */
if(H5Pclose(pid) < 0) TEST_ERROR
@@ -2678,6 +3085,40 @@ test_copy_dataset_chunked_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+ /*
+ * create 2-D dataset: chunked, empty, non-filtered,
+ * dims=max dims=chunk dims, H5D_ALLOC_TIME_LATE
+ */
+
+ /* Set 2-D dataspace dimensions */
+ dim2d[0] = DIM_SIZE_1;
+ dim2d[1] = DIM_SIZE_2;
+
+ /* create 2-D dataspace */
+ if((sid = H5Screate_simple(2, dim2d, dim2d)) < 0) TEST_ERROR
+
+ /* create and set chunk plist */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, dim2d) < 0) TEST_ERROR
+
+ /* Set allocation time to late */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_LATE) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED4_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -2693,14 +3134,23 @@ test_copy_dataset_chunked_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* copy the datasets from SRC to DST */
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED_SINGLE, fid_dst, NAME_DATASET_CHUNKED_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2, fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2_SINGLE, fid_dst, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED3, fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED3_SINGLE, fid_dst, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED4_SINGLE, fid_dst, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
- /* open the dataset for copy */
+ /* open the dataset NAME_DATASET_CHUNKED in SRC file */
if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
- /* open the destination dataset */
+ /* open the copied dataset NAME_DATASET_CHUNKED at destination */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_EARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
@@ -2710,12 +3160,113 @@ test_copy_dataset_chunked_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the source dataset */
if(H5Dclose(did) < 0) TEST_ERROR
- /* open the dataset for copy */
+ /* open the dataset NAME_DATASET_CHUNKED_SINGLE in SRC file */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset NAME_DATASET_CHUNKED2 in SRC file */
if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
- /* open the destination dataset */
+ /* open the copied dataset NAME_DATASET_CHUNKED2 at destination */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset "NAME_DATASET_CHUNKED2_SINGLE in SRC file */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED2_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset NAME_DATASET_CHUNKED3 in SRC file */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED3 at destinaion */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_NONE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset NAME_DATASET_CHUNKED3_SINGLE in SRC file */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED3_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset NAME_DATASET_CHUNKED4_SINGLE in SRC file */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED4_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
@@ -2786,9 +3337,9 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf1d[i] = (float)(i / 10.0F);
+ buf1d[i] = (float)i / 10.0F;
for(j = 0; j < DIM_SIZE_2; j++)
- buf2d[i][j] = (float)(i + (j / 100.0F));
+ buf2d[i][j] = (float)i + ((float)j / 100.0F);
} /* end for */
/* Initialize the filenames */
@@ -2815,6 +3366,38 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* create dataset */
if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1d) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* Set extended dataset dimensions */
+ new_dim1d[0] = DIM_SIZE_1 * 2;
+
+ /* Extend dataset's dimensions */
+ if(H5Dset_extent(did, new_dim1d) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Change 1-D dataspace dimensions */
+ dim1d[0] = DIM_SIZE_1;
+ max_dim1d[0] = MAX_DIM_SIZE_1;
+
+ /* create 1-D dataspace */
+ if((sid = H5Screate_simple(1, dim1d, max_dim1d)) < 0) TEST_ERROR
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED3, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
/* close chunk plist */
if(H5Pclose(pid) < 0) TEST_ERROR
@@ -2836,6 +3419,7 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* Set 2-D dataspace dimensions */
dim2d[0]=DIM_SIZE_1;
dim2d[1]=DIM_SIZE_2;
@@ -2852,6 +3436,41 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* create dataset */
if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2d) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* Set extended dataset dimensions */
+ new_dim2d[0] = DIM_SIZE_1 * 2;
+ new_dim2d[1] = DIM_SIZE_2 * 2;
+
+ /* Extend dataset's dimensions */
+ if(H5Dset_extent(did, new_dim2d) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Change 2-D dataspace dimensions */
+ dim2d[0] = DIM_SIZE_1;
+ dim2d[1] = DIM_SIZE_2;
+ max_dim2d[0] = MAX_DIM_SIZE_1;
+ max_dim2d[1] = MAX_DIM_SIZE_2;
+
+ /* create 2-D dataspace */
+ if((sid = H5Screate_simple(2, dim2d, max_dim2d)) < 0) TEST_ERROR
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED4, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
/* close chunk plist */
if(H5Pclose(pid) < 0) TEST_ERROR
@@ -2874,6 +3493,7 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -2890,6 +3510,8 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* copy the datasets from SRC to DST */
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2, fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED3, fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED4, fid_dst, NAME_DATASET_CHUNKED4, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
/* open the dataset for copy */
if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
@@ -2897,6 +3519,10 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* open the destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_EARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
@@ -2906,12 +3532,57 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the source dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* open the dataset for copy */
if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
/* open the destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_BT2, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_NONE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED4, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED4, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* Check if the array index type is correct */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_NONE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
@@ -2921,6 +3592,7 @@ test_copy_dataset_chunked_sparse(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the source dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -2994,8 +3666,8 @@ test_copy_dataset_compressed(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
if((fid_src = H5Fcreate(src_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) TEST_ERROR
/* Set dataspace dimensions */
- dim2d[0]=DIM_SIZE_1;
- dim2d[1]=DIM_SIZE_2;
+ dim2d[0] = DIM_SIZE_1;
+ dim2d[1] = DIM_SIZE_2;
/* create dataspace */
if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
@@ -3008,6 +3680,107 @@ test_copy_dataset_compressed(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* create dataset */
if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /*
+ * create 2-D dataset: chunked, filtered, with data
+ * dims=max dims=chunk dims, H5D_ALLOC_TIME_INC(default)
+ */
+ /* create dataspace */
+ if((sid = H5Screate_simple(2, dim2d, dim2d)) < 0) TEST_ERROR
+
+ /* create and set comp & chunk plist */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, dim2d) < 0) TEST_ERROR
+ if(H5Pset_deflate(pid, 9) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED2_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /*
+ * create 2-D dataset: chunked, filtered, with data
+ * dims=chunk dims, H5D_ALLOC_TIME_EARLY
+ */
+ /* create dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED3_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /*
+ * create 2-D dataset: chunked, filtered, with data
+ * dims=chunk dims, H5D_ALLOC_TIME_LATE
+ */
+ /* create dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* create and set comp & chunk plist */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, dim2d) < 0) TEST_ERROR
+ if(H5Pset_deflate(pid, 9) < 0) TEST_ERROR
+
+ /* Set allocation time to late */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_LATE) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED4_SINGLE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
/* close chunk plist */
if(H5Pclose(pid) < 0) TEST_ERROR
@@ -3038,6 +3811,17 @@ test_copy_dataset_compressed(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* copy the dataset from SRC to DST */
if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2, fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED2_SINGLE, fid_dst, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED3_SINGLE, fid_dst, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED4_SINGLE, fid_dst, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ if(H5Fclose(fid_dst) < 0) TEST_ERROR
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+ /* Re-open the source and destination files for verification */
+ if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR
+ if((fid_dst = H5Fopen(dst_filename, H5F_ACC_RDONLY, dst_fapl)) < 0) TEST_ERROR
/* open the dataset for copy */
if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
@@ -3045,6 +3829,82 @@ test_copy_dataset_compressed(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* open the destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset NAME_DATASET_CHUNKED2_SINGLE at source */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED2_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED2_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset NAME_DATASET_CHUNKED3_SINGLE at source */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED3_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED3_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset NAME_DATASET_CHUNKED4_SINGLE at source */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the copied dataset NAME_DATASET_CHUNKED4_SINGLE at destination */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED4_SINGLE, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_SINGLE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
@@ -3080,6 +3940,150 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_copy_dataset_no_edge_filt
+ *
+ * Purpose: Create a compressed, chunked dataset in SRC file and copy it to DST file
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, May 11, 2010
+ * Mostly copied from test_copy_dataset_compressed, by
+ * Quincey Koziol
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
+ hid_t dst_fapl)
+{
+#ifdef H5_HAVE_FILTER_DEFLATE
+ hid_t fid_src = -1, fid_dst = -1; /* File IDs */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t pid = -1; /* Dataset creation property list ID */
+ hid_t did = -1, did2 = -1; /* Dataset IDs */
+ hsize_t dim2d[2]; /* Dataset dimensions */
+ hsize_t chunk_dim2d[2] ={CHUNK_SIZE_1, CHUNK_SIZE_2}; /* Chunk dimensions */
+ float buf[DIM_SIZE_1][DIM_SIZE_2]; /* Buffer for writing data */
+ int i, j; /* Local index variables */
+ char src_filename[NAME_BUF_SIZE];
+ char dst_filename[NAME_BUF_SIZE];
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ TESTING("H5Ocopy(): compressed dataset with no edge filters");
+
+#ifndef H5_HAVE_FILTER_DEFLATE
+ SKIPPED();
+ puts(" Deflation filter not available");
+#else /* H5_HAVE_FILTER_DEFLATE */
+ /* set initial data values */
+ for (i=0; i<DIM_SIZE_1; i++)
+ for (j=0; j<DIM_SIZE_2; j++)
+ buf[i][j] = 100.0F; /* Something easy to compress */
+
+ /* Initialize the filenames */
+ h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename);
+ h5_fixname(FILENAME[1], dst_fapl, dst_filename, sizeof dst_filename);
+
+ /* Reset file address checking info */
+ addr_reset();
+
+ /* create source file */
+ if((fid_src = H5Fcreate(src_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) TEST_ERROR
+
+ /* Set dataspace dimensions */
+ dim2d[0]=DIM_SIZE_1;
+ dim2d[1]=DIM_SIZE_2;
+
+ /* create dataspace */
+ if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR
+
+ /* create and set comp & chunk plist, and disable partial chunk filters */
+ if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(pid, 2, chunk_dim2d) < 0) TEST_ERROR
+ if(H5Pset_deflate(pid, 9) < 0) TEST_ERROR
+ if(H5Pset_chunk_opts(pid, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) TEST_ERROR
+
+ /* create dataset */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* close chunk plist */
+ if(H5Pclose(pid) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close dataspace */
+ if(H5Sclose(sid) < 0) TEST_ERROR
+
+ /* attach attributes to the dataset */
+ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close the SRC file */
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+
+ /* open the source file with read-only */
+ if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR
+
+ /* create destination file */
+ if((fid_dst = H5Fcreate(dst_filename, H5F_ACC_TRUNC, fcpl_dst, dst_fapl)) < 0) TEST_ERROR
+
+ /* Create an uncopied object in destination file so that addresses in source and destination files aren't the same */
+ if(H5Gclose(H5Gcreate2(fid_dst, NAME_GROUP_UNCOPIED, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* copy the dataset from SRC to DST */
+ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* H5Pset_chunk_opts() will set layout version to 4 which will use latest indexing available */
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_FARRAY) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* close the SRC file */
+ if(H5Fclose(fid_src) < 0) TEST_ERROR
+
+ /* close the DST file */
+ if(H5Fclose(fid_dst) < 0) TEST_ERROR
+
+ PASSED();
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ return 0;
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(did2);
+ H5Dclose(did);
+ H5Pclose(pid);
+ H5Sclose(sid);
+ H5Fclose(fid_dst);
+ H5Fclose(fid_src);
+ } H5E_END_TRY;
+ return 1;
+#endif /* H5_HAVE_FILTER_DEFLATE */
+} /* end test_copy_dataset_no_edge_filt */
+
+
+/*-------------------------------------------------------------------------
* Function: test_copy_dataset_compact
*
* Purpose: Create a compact dataset in SRC file and copy it to DST file
@@ -3110,7 +4114,7 @@ test_copy_dataset_compact(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t
/* set initial data values */
for (i=0; i<DIM_SIZE_1; i++)
for (j=0; j<DIM_SIZE_2; j++)
- buf[i][j] = (float)(i+j/100.0F);
+ buf[i][j] = (float)i + (float)j / 100.0F;
/* Initialize the filenames */
h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename);
@@ -4199,6 +5203,19 @@ test_copy_dataset_chunked_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* create dataset at SRC file */
if((did = H5Dcreate2(fid_src, NAME_DATASET_VL, tid, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+ /* write data into file */
+ if(H5Dwrite(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset at SRC file */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_VL2, tid, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
/* close chunk plist */
if(H5Pclose(pid) < 0) TEST_ERROR
@@ -4208,6 +5225,7 @@ test_copy_dataset_chunked_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -4223,6 +5241,7 @@ test_copy_dataset_chunked_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* copy the dataset from SRC to DST */
if(H5Ocopy(fid_src, NAME_DATASET_VL, fid_dst, NAME_DATASET_VL, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_VL2, fid_dst, NAME_DATASET_VL2, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
/* open the dataset for copy */
if((did = H5Dopen2(fid_src, NAME_DATASET_VL, H5P_DEFAULT)) < 0) TEST_ERROR
@@ -4230,6 +5249,28 @@ test_copy_dataset_chunked_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* open the destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_VL, H5P_DEFAULT)) < 0) TEST_ERROR
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_VL2, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_VL2, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_NONE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
@@ -4239,6 +5280,7 @@ test_copy_dataset_chunked_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid
/* close the source dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -7245,7 +8287,7 @@ static int
test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fapl)
{
hid_t fid_src = -1, fid_dst = -1; /* File IDs */
- hid_t tid = -1, tid2=-1; /* Datatype ID */
+ hid_t tid = -1, tid2=-1; /* Datatype ID */
hid_t sid = -1; /* Dataspace ID */
hid_t pid = -1; /* Dataset creation property list ID */
hid_t did = -1, did2 = -1; /* Dataset IDs */
@@ -7301,7 +8343,7 @@ test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* create nested VL datatype */
if((tid2 = H5Tvlen_create(tid)) < 0) TEST_ERROR
- /* create and set chunk plist */
+ /* create and set chunk plist */
if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
if(H5Pset_chunk(pid, 1, chunk_dim1d) < 0) TEST_ERROR
@@ -7311,12 +8353,26 @@ test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* write data into file */
if(H5Dwrite(did, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
- /* close compact plist */
+ /* close the dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
+ /* Set allocation time to early */
+ if(H5Pset_alloc_time(pid, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR
+
+ /* create dataset at SRC file */
+ if((did = H5Dcreate2(fid_src, NAME_DATASET_VL_VL2, tid2, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* write data into file */
+ if(H5Dwrite(did, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR
+
+ /* close plist */
if(H5Pclose(pid) < 0) TEST_ERROR
/* close the dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -7331,6 +8387,7 @@ test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* copy the dataset from SRC to DST */
if(H5Ocopy(fid_src, NAME_DATASET_VL_VL, fid_dst, NAME_DATASET_VL_VL, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
+ if(H5Ocopy(fid_src, NAME_DATASET_VL_VL2, fid_dst, NAME_DATASET_VL_VL2, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR
/* open the dataset for copy */
if((did = H5Dopen2(fid_src, NAME_DATASET_VL_VL, H5P_DEFAULT)) < 0) TEST_ERROR
@@ -7338,6 +8395,9 @@ test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* open the destination dataset */
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_VL_VL, H5P_DEFAULT)) < 0) TEST_ERROR
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_FARRAY, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
/* Check if the datasets are equal */
if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
@@ -7347,6 +8407,26 @@ test_copy_dataset_chunked_vl_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* close the source dataset */
if(H5Dclose(did) < 0) TEST_ERROR
+
+ /* open the dataset for copy */
+ if((did = H5Dopen2(fid_src, NAME_DATASET_VL_VL2, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ /* open the destination dataset */
+ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_VL_VL2, H5P_DEFAULT)) < 0) TEST_ERROR
+
+ if(compare_idx_type(src_fapl, did2, H5D_CHUNK_IDX_NONE, H5D_CHUNK_IDX_BTREE) != TRUE)
+ TEST_ERROR
+
+ /* Check if the datasets are equal */
+ if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
+
+ /* close the destination dataset */
+ if(H5Dclose(did2) < 0) TEST_ERROR
+
+ /* close the source dataset */
+ if(H5Dclose(did) < 0) TEST_ERROR
+
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -7582,12 +8662,12 @@ test_copy_dataset_contig_cmpd_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl,
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf[i].a = i * (i - 1);
+ buf[i].a = (int)(i * (i - 1));
buf[i].b.len = i+1;
buf[i].b.p = (int *)HDmalloc(buf[i].b.len * sizeof(int));
for(j = 0; j < buf[i].b.len; j++)
((int *)buf[i].b.p)[j] = (int)(i * 10 + j);
- buf[i].c = 1.0F / (i + 1.0F);
+ buf[i].c = 1.0F / ((float)i + 1.0F);
} /* end for */
/* Initialize the filenames */
@@ -7721,12 +8801,12 @@ test_copy_dataset_chunked_cmpd_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf[i].a = i * (i - 1);
+ buf[i].a = (int)(i * (i - 1));
buf[i].b.len = i+1;
buf[i].b.p = (int *)HDmalloc(buf[i].b.len * sizeof(int));
for(j = 0; j < buf[i].b.len; j++)
((int *)buf[i].b.p)[j] = (int)(i * 10 + j);
- buf[i].c = 1.0F / (i + 1.0F);
+ buf[i].c = 1.0F / ((float)i + 1.0F);
} /* end for */
/* Initialize the filenames */
@@ -7866,12 +8946,12 @@ test_copy_dataset_compact_cmpd_vl(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl
/* set initial data values */
for(i = 0; i < DIM_SIZE_1; i++) {
- buf[i].a = i * (i - 1);
+ buf[i].a = (int)(i * (i - 1));
buf[i].b.len = i+1;
buf[i].b.p = (int *)HDmalloc(buf[i].b.len * sizeof(int));
for(j = 0; j < buf[i].b.len; j++)
((int *)buf[i].b.p)[j] = (int)(i * 10 + j);
- buf[i].c = 1.0F / (i + 1.0F);
+ buf[i].c = 1.0F / ((float)i + 1.0F);
} /* end for */
/* Initialize the filenames */
@@ -11868,9 +12948,11 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
hid_t tid = -1; /* Datatype ID */
hid_t did = -1, did2 = -1; /* Dataset IDs */
hid_t gid = -1, gid2 = -1; /* Group IDs */
+ hid_t ocpl = -1; /* Object copy property list ID */
int buf[DIM_SIZE_1][DIM_SIZE_2]; /* Buffer for writing data */
int newbuf[DIM_SIZE_1][DIM_SIZE_2]; /* Buffer for writing data */
hsize_t dim2d[2]; /* Dataset dimensions */
+ hbool_t copy_open_obj = TRUE; /* Property to indicate we are copying open objects */
int i, j; /* local index variables */
char src_filename[NAME_BUF_SIZE];
char dst_filename[NAME_BUF_SIZE];
@@ -11919,6 +13001,18 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
/* attach attributes to the dataset */
if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR
+
+ /* Create object copy property list, for passing private property to
+ * dataset comparison routine
+ */
+ /* Create the object copy plist */
+ if((ocpl = H5Pcreate(H5P_OBJECT_COPY)) < 0) TEST_ERROR
+
+ /* Set the private property */
+ if(H5Pinsert2(ocpl, COPY_OPEN_OBJ_NAME, COPY_OPEN_OBJ_SIZE, &copy_open_obj, NULL, NULL, NULL, NULL, NULL, NULL) < 0) TEST_ERROR
+
+
+
/*
* Test case 1
*/
@@ -11933,7 +13027,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((did2 = H5Dopen2(fid_src, NAME_DATASET_SIMPLE2, H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the datasets are equal */
- if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
+ if(compare_datasets(did, did2, ocpl, buf) != TRUE) TEST_ERROR
/* close the copied dataset */
if(H5Dclose(did2) < 0) TEST_ERROR
@@ -11948,7 +13042,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_SIMPLE, H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the datasets are equal */
- if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
+ if(compare_datasets(did, did2, ocpl, buf) != TRUE) TEST_ERROR
/* close the copied dataset in DST file */
if(H5Dclose(did2) < 0) TEST_ERROR
@@ -11975,7 +13069,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((did2 = H5Dopen2(fid_src, "NEW_DATASET", H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the datasets are equal */
- if(compare_datasets(did, did2, H5P_DEFAULT, newbuf) != TRUE) TEST_ERROR
+ if(compare_datasets(did, did2, ocpl, newbuf) != TRUE) TEST_ERROR
/* close the copied dataset in SRC file */
if(H5Dclose(did2) < 0) TEST_ERROR
@@ -11989,7 +13083,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((did2 = H5Dopen2(fid_dst, "NEW_DATASET", H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the datasets are equal */
- if(compare_datasets(did, did2, H5P_DEFAULT, newbuf) != TRUE) TEST_ERROR
+ if(compare_datasets(did, did2, ocpl, newbuf) != TRUE) TEST_ERROR
/* close the copied dataset in DST file */
if(H5Dclose(did2) < 0) TEST_ERROR
@@ -12029,7 +13123,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((did2 = H5Dopen2(fid_src, NAME_DATASET_NAMED_DTYPE2, H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the datasets are equal */
- if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
+ if(compare_datasets(did, did2, ocpl, buf) != TRUE) TEST_ERROR
/* close the copied dataset in SRC file */
if(H5Dclose(did2) < 0) TEST_ERROR
@@ -12044,7 +13138,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((did2 = H5Dopen2(fid_dst, NAME_DATASET_NAMED_DTYPE2, H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the datasets are equal */
- if(compare_datasets(did, did2, H5P_DEFAULT, buf) != TRUE) TEST_ERROR
+ if(compare_datasets(did, did2, ocpl, buf) != TRUE) TEST_ERROR
/* close the copied dataset in DST file */
if(H5Dclose(did2) < 0) TEST_ERROR
@@ -12079,7 +13173,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((gid2 = H5Gopen2(fid_src, "COPIED_GROUP", H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the groups are equal */
- if(compare_groups(gid, gid2, H5P_DEFAULT, -1, 0) != TRUE) TEST_ERROR
+ if(compare_groups(gid, gid2, ocpl, -1, 0) != TRUE) TEST_ERROR
/* close the DST dataset */
if(H5Gclose(gid2) < 0) TEST_ERROR
@@ -12094,7 +13188,7 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
if((gid2 = H5Gopen2(fid_dst, "COPIED_GROUP", H5P_DEFAULT)) < 0) TEST_ERROR
/* Check if the groups are equal */
- if(compare_groups(gid, gid2, H5P_DEFAULT, -1, 0) != TRUE) TEST_ERROR
+ if(compare_groups(gid, gid2, ocpl, -1, 0) != TRUE) TEST_ERROR
/* close the group in DST file */
if(H5Gclose(gid2) < 0) TEST_ERROR
@@ -12108,6 +13202,9 @@ test_copy_dataset_open(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst
/* close dataspace */
if(H5Sclose(sid) < 0) TEST_ERROR
+ /* close the object copy property list */
+ if(H5Pclose(ocpl) < 0) TEST_ERROR
+
/* close the SRC file */
if(H5Fclose(fid_src) < 0) TEST_ERROR
@@ -12124,6 +13221,7 @@ error:
H5Sclose(sid);
H5Gclose(gid);
H5Gclose(gid2);
+ H5Pclose(ocpl);
H5Fclose(fid_dst);
H5Fclose(fid_src);
} H5E_END_TRY;
@@ -12260,6 +13358,7 @@ main(void)
nerrors += test_copy_dataset_chunked_empty(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_chunked_sparse(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_compressed(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
+ nerrors += test_copy_dataset_no_edge_filt(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_compact(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_multi_ohdr_chunks(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_dataset_attr_named_dtype(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
@@ -12308,7 +13407,6 @@ main(void)
H5O_COPY_WITHOUT_ATTR_FLAG | H5O_COPY_PRESERVE_NULL_FLAG,
TRUE, "H5Ocopy(): preserve NULL messages");
nerrors += test_copy_dataset_open(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
-
/* Tests that do not use attributes and do not need to be tested
* multiple times for different attribute configurations */
if(configuration < CONFIG_DENSE) {
@@ -12364,8 +13462,7 @@ main(void)
nerrors += test_copy_old_layout(fcpl_dst, dst_fapl);
nerrors += test_copy_null_ref(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
nerrors += test_copy_iterate(fcpl_src, fcpl_dst, src_fapl, dst_fapl);
- }
-
+ } /* end if */
/* TODO: not implemented
nerrors += test_copy_mount(src_fapl);
*/
diff --git a/test/ohdr.c b/test/ohdr.c
index 53aa18f..6faced9 100644
--- a/test/ohdr.c
+++ b/test/ohdr.c
@@ -45,6 +45,11 @@ const char *FILENAME[] = {
*/
#define FILE_BOGUS "tbogus.h5"
+/* */
+#define FILE_OHDR_SWMR "ohdr_swmr.h5"
+#define DSET_NAME "COMPACT_DSET"
+#define OBJ_VERSION_LATEST 2
+
/*
* Verify that messages are moved forward into a "continuation message":
* Create an object header with several continuation chunks
@@ -295,6 +300,150 @@ error:
return -1;
} /* test_ohdr_cache() */
+/*
+ * To exercise the coding for the re-read of the object header for SWMR access.
+ * When the object header is read in H5O_load() of H5Ocache.c, the library initially reads
+ * 512 bytes for decoding, then reads the remaining bytes later if the object header is
+ * greater than 512 bytes. For SWMR access, the read should be done all at one time.
+ */
+static herr_t
+test_ohdr_swmr(hbool_t new_format)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t plist = -1; /* Dataset creation property list */
+ size_t compact_size = 1024; /* The size of compact dataset */
+ int *wbuf = NULL; /* Buffer for writing */
+ hsize_t dims[1]; /* Dimension sizes */
+ size_t u; /* Iterator */
+ int n; /* Data variable */
+ H5O_info_t obj_info; /* Information for the object */
+
+ if(new_format) {
+ TESTING("exercise the coding for the re-read of the object header for SWMR access: latest-format");
+ } else {
+ TESTING("exercise the coding for the re-read of the object header for SWMR access: non-latest-format");
+ } /* end if */
+
+ /* File access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file with/without latest format: ensure version 2 object header for SWMR */
+ if(new_format) {
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ if((fid = H5Fcreate(FILE_OHDR_SWMR, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+ } else {
+ if((fid = H5Fcreate(FILE_OHDR_SWMR, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+ } /* end if */
+
+ /* Initialize data */
+ wbuf = (int *)HDcalloc(compact_size, sizeof(int));
+ n = 0;
+ for(u = 0; u < compact_size; u++)
+ wbuf[u] = n++;
+
+ /* Create a small data space for compact dataset */
+ dims[0] = (hsize_t)compact_size;
+ if((sid = H5Screate_simple(1, dims, NULL)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create property list for compact dataset creation */
+ if((plist = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the layout for the compact dataset */
+ if(H5Pset_layout(plist, H5D_COMPACT) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a compact dataset */
+ if((did = H5Dcreate2(fid, DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, plist, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Write to the compact dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file for SWMR write with/without latest format */
+ if((fid = H5Fopen(FILE_OHDR_SWMR, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the compact dataset */
+ if((did = H5Dopen2(fid, DSET_NAME, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the object information */
+ if(H5Oget_info(did, &obj_info) < 0)
+ FAIL_STACK_ERROR
+
+ if(obj_info.hdr.version != OBJ_VERSION_LATEST)
+ FAIL_STACK_ERROR
+
+ /* The size of object header should be greater than the speculative read size of 512 */
+ /* This will exercise the coding for the re-read of the object header for SWMR access */
+ if(obj_info.hdr.space.total < 512)
+ TEST_ERROR;
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the dataspace */
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the dataset creation property list */
+ if(H5Pclose(plist) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file access property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Remove the test file */
+ if(HDremove(FILE_OHDR_SWMR) < 0)
+ FAIL_STACK_ERROR
+
+ /* Free the buffer */
+ HDfree(wbuf);
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Dclose(did);
+ H5Sclose(sid);
+ H5Pclose(plist);
+ H5Pclose(fapl);
+ HDremove(FILE_OHDR_SWMR);
+ HDfree(wbuf);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_ohdr_swmr() */
+
/*-------------------------------------------------------------------------
* Function: main
@@ -796,6 +945,8 @@ main(void)
PASSED();
+ TESTING("object with unknown header message & 'fail if unknown and open for write' flag set");
+
/* Open the file with objects that have unknown header messages (generated with gen_bogus.c) with RW intent this time */
if((file2 = H5Fopen(testfile, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -831,21 +982,26 @@ main(void)
PASSED();
}
+
/* Close the file we created */
if(H5Fclose(file) < 0)
TEST_ERROR
- /* Test object header creation metadata cache issues */
- if(test_ohdr_cache(filename, fapl) < 0)
+ /* Test object header creation metadata cache issues */
+ if(test_ohdr_cache(filename, fapl) < 0)
TEST_ERROR
} /* end for */
/* Verify symbol table messages are cached */
if(h5_verify_cached_stabs(FILENAME, fapl) < 0) TEST_ERROR
+ /* A test to exercise the re-read of the object header for SWMR access */
+ if(test_ohdr_swmr(TRUE) < 0) TEST_ERROR
+ if(test_ohdr_swmr(FALSE) < 0) TEST_ERROR
+
puts("All object header tests passed.");
h5_cleanup(FILENAME, fapl);
- return(0);
+ return 0;
error:
puts("*** TESTS FAILED ***");
@@ -853,6 +1009,6 @@ error:
H5Fclose(file);
} H5E_END_TRY;
- return(1);
+ return 1;
} /* end main() */
diff --git a/test/set_extent.c b/test/set_extent.c
index ae83ba2..8942f3f 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -49,8 +49,9 @@ const char *FILENAME[] = {
#define CONFIG_COMPRESS 0x01u
#define CONFIG_FILL 0x02u
#define CONFIG_EARLY_ALLOC 0x04u
+#define CONFIG_UNFILT_EDGE 0x08u
#define CONFIG_ALL (CONFIG_COMPRESS + CONFIG_FILL \
- + CONFIG_EARLY_ALLOC)
+ + CONFIG_EARLY_ALLOC + CONFIG_UNFILT_EDGE)
#define FILL_VALUE -1
#define DO_RANKS_PRINT_CONFIG(TEST) { \
printf(" Config:\n"); \
@@ -59,6 +60,8 @@ const char *FILENAME[] = {
printf(" Fill value: %s\n", (do_fillvalue ? "yes" : "no")); \
printf(" Early allocation: %s\n", (config & CONFIG_EARLY_ALLOC ? "yes" \
: "no")); \
+ printf(" Edge chunk filters: %s\n", (config & CONFIG_UNFILT_EDGE \
+ ? "disabled" : "enabled")); \
} /* end DO_RANKS_PRINT_CONFIG */
#define RANK1 1
@@ -81,26 +84,46 @@ const char *FILENAME[] = {
test_random_rank4_dump(NDIM_SETS, dim_log, cdims, J, K, L, M); \
goto error; \
} /* end RAND4_FAIL_DUMP */
+#define RAND4_VL_NITER 40
+#define RAND4_VL_SPARSE_SWITCH 5
-static int do_ranks( hid_t fapl );
+typedef enum rank4_index_t {
+ RANK4_INDEX_BTREE = 0, /* Use b-tree (1/2) as chunk index */
+ RANK4_INDEX_FARRAY, /* Use fixed array as chunk index */
+ RANK4_INDEX_EARRAY, /* Use extensible array as chunk index */
+ RANK4_NINDICES, /* Must be last */
+} rank4_index_t;
+
+static int do_ranks( hid_t fapl, hbool_t new_format );
static int do_layouts( hid_t fapl );
static int test_rank1( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_rank2( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_rank3( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k);
static int test_random_rank4( hid_t fapl,
hid_t dcpl,
hbool_t do_fillvalue,
- hbool_t do_sparse);
+ hbool_t disable_edge_filters,
+ hbool_t do_sparse,
+ rank4_index_t index_type);
+static int test_random_rank4_vl( hid_t fapl,
+ hid_t dcpl,
+ hbool_t do_fillvalue,
+ hbool_t disable_edge_filters,
+ hbool_t do_sparse,
+ rank4_index_t index_type);
static int test_external( hid_t fapl );
static int test_layouts( H5D_layout_t layout, hid_t fapl );
@@ -174,7 +197,7 @@ int main( void )
H5F_LIBVER_LATEST) < 0) TEST_ERROR
/* Tests which use chunked datasets */
- nerrors += do_ranks( my_fapl ) < 0 ? 1 : 0;
+ nerrors += do_ranks( my_fapl, new_format ) < 0 ? 1 : 0;
} /* end for */
/* Tests which do not use chunked datasets */
@@ -211,10 +234,12 @@ error:
* test with several ranks
*-------------------------------------------------------------------------
*/
-static int do_ranks( hid_t fapl )
+static int do_ranks( hid_t fapl, hbool_t new_format )
{
- hbool_t do_fillvalue = 0;
+ hbool_t do_fillvalue = FALSE;
+ hbool_t disable_edge_filters = FALSE;
+ rank4_index_t index_type;
hid_t dcpl = -1;
int fillvalue = FILL_VALUE;
unsigned config;
@@ -250,6 +275,11 @@ static int do_ranks( hid_t fapl )
if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
TEST_ERROR
+ if(config & CONFIG_UNFILT_EDGE)
+ disable_edge_filters = TRUE;
+ else
+ disable_edge_filters = FALSE;
+
/* Run tests */
if(do_fillvalue) {
unsigned ifset;
@@ -264,25 +294,25 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0)
TEST_ERROR
- if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 1")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 3")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree")
printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET"
: "H5D_FILL_TIME_ALLOC"));
@@ -296,19 +326,19 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0)
TEST_ERROR
- if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 1")
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2")
goto error;
} /* end if */
- if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0) {
+ if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 3")
goto error;
} /* end if */
- if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0) {
+ if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) {
DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree")
goto error;
} /* end if */
@@ -319,17 +349,55 @@ static int do_ranks( hid_t fapl )
if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0)
TEST_ERROR
- if(test_random_rank4(fapl, dcpl, do_fillvalue, FALSE) < 0) {
- DO_RANKS_PRINT_CONFIG("Randomized rank 4")
- goto error;
- } /* end if */
+ /* Iterate over different index types, but only if using the new format
+ */
+ for(index_type = RANK4_INDEX_BTREE; index_type < RANK4_NINDICES;
+ index_type++) {
+ /* Standard test */
+ if(test_random_rank4(fapl, dcpl, do_fillvalue, disable_edge_filters,
+ FALSE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY ? "farray"
+ : "earray"));
+ goto error;
+ } /* end if */
- if(!(config & CONFIG_EARLY_ALLOC))
- if(test_random_rank4(fapl, dcpl, do_fillvalue, TRUE) < 0) {
- DO_RANKS_PRINT_CONFIG("Randomized rank 4 with sparse allocation")
+ /* VL test */
+ if(test_random_rank4_vl(fapl, dcpl, do_fillvalue,
+ disable_edge_filters, FALSE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4 variable length")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY ? "farray"
+ : "earray"));
goto error;
} /* end if */
+ /* Sparse allocation test (regular and VL) */
+ if(!(config & CONFIG_EARLY_ALLOC)) {
+ if(test_random_rank4(fapl, dcpl, do_fillvalue,
+ disable_edge_filters, TRUE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4 with sparse allocation")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY
+ ? "farray" : "earray"));
+ goto error;
+ } /* end if */
+ if(test_random_rank4_vl(fapl, dcpl, do_fillvalue,
+ disable_edge_filters, TRUE, index_type) < 0) {
+ DO_RANKS_PRINT_CONFIG("Randomized rank 4 variable length with sparse allocation")
+ printf(" Index: %s\n", index_type == RANK4_INDEX_BTREE
+ ? "btree" : (index_type == RANK4_INDEX_FARRAY
+ ? "farray" : "earray"));
+ goto error;
+ } /* end if */
+ } /* end if */
+
+ /* Break out if using the old format */
+ if(!new_format)
+ break;
+ } /* end for */
+
/* Close dcpl */
if(H5Pclose(dcpl) < 0)
TEST_ERROR
@@ -379,6 +447,7 @@ error:
static int test_rank1( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -460,6 +529,9 @@ static int test_rank1( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* create, write dataset
@@ -885,6 +957,7 @@ error:
static int test_rank2( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -965,6 +1038,9 @@ static int test_rank2( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* Procedure 1
@@ -1500,6 +1576,7 @@ error:
static int test_rank3( hid_t fapl,
hid_t dcpl,
hbool_t do_fill_value,
+ hbool_t disable_edge_filters,
hbool_t set_istore_k)
{
@@ -1586,6 +1663,9 @@ static int test_rank3( hid_t fapl,
{
TEST_ERROR
}
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
/*-------------------------------------------------------------------------
* create, write array
@@ -2660,14 +2740,16 @@ error:
*-------------------------------------------------------------------------
*/
static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
- hbool_t do_sparse )
+ hbool_t disable_edge_filters, hbool_t do_sparse,
+ rank4_index_t index_type )
{
hid_t file = -1;
hid_t dset = -1;
hid_t fspace = -1;
hid_t mspace = -1;
hid_t my_dcpl = -1;
- hsize_t dims[4]; /* Dataset's dimensions */
+ hsize_t dims[4] = {10, 10, 10, 10}; /* Dataset's dimensions */
+ hsize_t max_dims[4] = {10, 10, 10, 10}; /* Maximum dimensions */
hsize_t old_dims[4]; /* Old dataset dimensions */
hsize_t min_unwritten_dims[4]; /* Minimum dimensions since last write */
hsize_t *valid_dims = old_dims; /* Dimensions of region still containing written data */
@@ -2679,31 +2761,54 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
static hsize_t dim_log[RAND4_NITER+1][4]; /* Log of dataset dimensions */
hbool_t zero_dim = FALSE; /* Whether a dimension is 0 */
hbool_t writing = TRUE; /* Whether we're writing to the dset */
+ unsigned scalar_iter; /* Iteration to shrink dset to 1x1x1x1 */
volatile unsigned i, j, k, l, m; /* Local indices */
char filename[NAME_BUF_SIZE];
+ /*!FIXME Skip the test if a fixed array index is requested, as resizing
+ * fixed arrays is broken now. Extensible arrays are also broken. Remove
+ * these lines as appropriate when these problems are fixed. */
+ /* Fixed Array index type is now fixed */
+ if(index_type == RANK4_INDEX_EARRAY)
+ return 0;
+
/* create a new file */
h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
TEST_ERROR
+ /* Set maximum dimensions as appropriate for index type */
+ if(index_type == RANK4_INDEX_BTREE)
+ for(i=0; i<4; i++)
+ max_dims[i] = H5S_UNLIMITED;
+ else if(index_type == RANK4_INDEX_EARRAY)
+ max_dims[1] = H5S_UNLIMITED;
+
/* Generate random chunk dimensions, 2-4 */
for(i=0; i<4; i++)
cdims[i] = (hsize_t)((HDrandom() % 3) + 2);
- /* Generate initial dataset size, 1-10 */
+ /* Pick iteration to shrink dataset to 1x1x1x1 */
+ scalar_iter = (unsigned)(HDrandom() % RAND4_NITER);
+
+ /* Generate initial dataset size, 1-10, unless using fixed array index or
+ * scalar_iter is 0 */
for(i=0; i<4; i++) {
- dims[i] = (hsize_t)((HDrandom() % 10) + 1);
+ dims[i] = (hsize_t)(index_type != RANK4_INDEX_FARRAY
+ ? (0 == scalar_iter ? 1 : ((HDrandom() % 10) + 1)) : 10);
dim_log[0][i] = dims[i];
} /* end for */
/* Create dataset */
- if((fspace = H5Screate_simple(4, dims, mdims)) < 0)
+ if((fspace = H5Screate_simple(4, dims, max_dims)) < 0)
TEST_ERROR
if((my_dcpl = H5Pcopy(dcpl)) < 0)
TEST_ERROR
if(H5Pset_chunk(my_dcpl, 4, cdims) < 0)
TEST_ERROR
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_INT, fspace, H5P_DEFAULT,
my_dcpl, H5P_DEFAULT)) < 0)
TEST_ERROR
@@ -2734,11 +2839,13 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
RAND4_FAIL_DUMP(i+1, -1, -1, -1, -1)
} /* end if */
- /* Generate new dataset size, 0-10 (0 much less likely) */
+ /* Generate new dataset size, 0-10 (0 much less likely). If i is
+ * scalar_iter, set all dims to 1. */
zero_dim = FALSE;
for(j=0; j<4; j++) {
old_dims[j] = dims[j];
- if((dims[j] = (hsize_t)(HDrandom() % 11)) == 0)
+ if((dims[j] = (hsize_t)(i == scalar_iter ? 1 : (HDrandom() % 11)))
+ == 0)
if((dims[j] = (hsize_t)(HDrandom() % 11)) == 0)
zero_dim = TRUE;
dim_log[i+1][j] = dims[j];
@@ -2825,6 +2932,289 @@ error:
return -1;
} /* end test_random_rank4 */
+/*-------------------------------------------------------------------------
+ * Function: test_random_rank4_vl
+ *
+ * Purpose: Test expanding and shrinking a rank 4 dataset with
+ * variable length data in a randomized fashion. Verifies
+ * that data is preserved (and filled, if do_fillvalue is
+ * true) as expected.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * Tueday, June 29, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static int test_random_rank4_vl( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
+ hbool_t disable_edge_filters, hbool_t do_sparse,
+ rank4_index_t index_type )
+{
+ hid_t file = -1;
+ hid_t dset = -1;
+ hid_t type = -1;
+ hid_t fspace = -1;
+ hid_t mspace = -1;
+ hid_t my_dcpl = -1;
+ hsize_t dims[4] = {10, 10, 10, 10}; /* Dataset's dimensions */
+ hsize_t max_dims[4] = {10, 10, 10, 10}; /* Maximum dimensions */
+ hsize_t old_dims[4]; /* Old dataset dimensions */
+ hsize_t min_unwritten_dims[4]; /* Minimum dimensions since last write */
+ hsize_t *valid_dims = old_dims; /* Dimensions of region still containing written data */
+ hsize_t cdims[4]; /* Chunk dimensions */
+ const hsize_t mdims[4] = {10, 10, 10, 10}; /* Memory buffer dimensions */
+ const hsize_t start[4] = {0, 0, 0, 0}; /* Start for hyperslab operations on memory */
+ static hvl_t rbuf[10][10][10][10]; /* Read buffer */
+ static hvl_t wbuf[10][10][10][10]; /* Write buffer */
+ static hsize_t dim_log[RAND4_NITER+1][4]; /* Log of dataset dimensions */
+ hbool_t zero_dim = FALSE; /* Whether a dimension is 0 */
+ hbool_t writing = TRUE; /* Whether we're writing to the dset */
+ hvl_t fill_value; /* Fill value */
+ unsigned scalar_iter; /* Iteration to shrink dset to 1x1x1x1 */
+ volatile unsigned i, j, k, l, m; /* Local indices */
+ char filename[NAME_BUF_SIZE];
+
+ /*!FIXME Skip the test if a fixed array index is requested, as resizing
+ * fixed arrays is broken now. Extensible arrays are also broken. Remove
+ * these lines as appropriate when these problems are fixed. */
+ if(index_type == RANK4_INDEX_FARRAY || index_type == RANK4_INDEX_EARRAY)
+ return 0;
+
+ /* Initialize fill value buffers so they aren't freed in case of an error */
+ fill_value.len = 0;
+ fill_value.p = NULL;
+ for(i=0; i<dims[0]; i++)
+ for(j=0; j<dims[1]; j++)
+ for(k=0; k<dims[2]; k++)
+ for(l=0; l<dims[3]; l++) {
+ rbuf[i][j][k][l].len = 0;
+ rbuf[i][j][k][l].p = NULL;
+ wbuf[i][j][k][l].len = 0;
+ wbuf[i][j][k][l].p = NULL;
+ } /* end for */
+
+ /* Allocate space for VL write buffers, since these never need to be
+ * reallocated */
+ for(i=0; i<dims[0]; i++)
+ for(j=0; j<dims[1]; j++)
+ for(k=0; k<dims[2]; k++)
+ for(l=0; l<dims[3]; l++) {
+ wbuf[i][j][k][l].len = 2;
+ if(NULL == (wbuf[i][j][k][l].p = HDmalloc(2 * sizeof(int))))
+ TEST_ERROR;
+ } /* end for */
+
+ /* create a new file */
+ h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
+ if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Create VL type */
+ if((type = H5Tvlen_create(H5T_NATIVE_INT)) < 0)
+ TEST_ERROR
+
+ /* Set maximum dimensions as appropriate for index type */
+ if(index_type == RANK4_INDEX_BTREE)
+ for(i=0; i<4; i++)
+ max_dims[i] = H5S_UNLIMITED;
+ else if(index_type == RANK4_INDEX_EARRAY)
+ max_dims[1] = H5S_UNLIMITED;
+
+ /* Generate random chunk dimensions, 2-4 */
+ for(i=0; i<4; i++)
+ cdims[i] = (hsize_t)((HDrandom() % 3) + 2);
+
+ /* Pick iteration to shrink dataset to 1x1x1x1 */
+ scalar_iter = (unsigned)(HDrandom() % RAND4_NITER);
+
+ /* Generate initial dataset size, 1-10, unless using fixed array index or
+ * scalar_iter is 0 */
+ for(i=0; i<4; i++) {
+ dims[i] = (hsize_t)(index_type != RANK4_INDEX_FARRAY
+ ? (0 == scalar_iter ? 1 : ((HDrandom() % 10) + 1)) : 10);
+ dim_log[0][i] = dims[i];
+ } /* end for */
+
+ /* Make a copy of the dcpl */
+ if((my_dcpl = H5Pcopy(dcpl)) < 0)
+ TEST_ERROR
+
+ /* Create VL fill value, if requested */
+ if(do_fillvalue) {
+ fill_value.len = 2;
+ if(NULL == (fill_value.p = HDmalloc(2 * sizeof(int))))
+ TEST_ERROR
+ ((int *)fill_value.p)[0] = 1;
+ ((int *)fill_value.p)[1] = 2;
+ if(H5Pset_fill_value(my_dcpl, type, &fill_value) < 0)
+ TEST_ERROR
+ } /* end if */
+
+ /* Create dataset */
+ if((fspace = H5Screate_simple(4, dims, max_dims)) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(my_dcpl, 4, cdims) < 0)
+ TEST_ERROR
+ if(disable_edge_filters)
+ if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ TEST_ERROR
+ if((dset = H5Dcreate2(file, "dset", type, fspace, H5P_DEFAULT, my_dcpl,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if(H5Sclose(fspace) < 0)
+ TEST_ERROR
+
+ /* Create memory space, and set initial selection */
+ if((mspace = H5Screate_simple(4, mdims, NULL)) < 0)
+ TEST_ERROR
+ if(H5Sselect_hyperslab(mspace, H5S_SELECT_SET, start, NULL, dims, NULL)
+ < 0)
+ TEST_ERROR
+
+ /* Main loop */
+ for(i=0; i<RAND4_VL_NITER; i++) {
+
+ /* Generate random write buffer */
+ if(writing && !zero_dim) {
+ for(j=0; j<dims[0]; j++)
+ for(k=0; k<dims[1]; k++)
+ for(l=0; l<dims[2]; l++)
+ for(m=0; m<dims[3]; m++) {
+ ((int *)wbuf[j][k][l][m].p)[0] = HDrandom();
+ ((int *)wbuf[j][k][l][m].p)[1] = HDrandom();
+ } /* end for */
+
+ /* Write data */
+ if(H5Dwrite(dset, type, mspace, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ RAND4_FAIL_DUMP(i+1, -1, -1, -1, -1)
+ } /* end if */
+
+ /* Generate new dataset size, 0-10 (0 much less likely). If i is
+ * scalar_iter, set all dims to 1. */
+ zero_dim = FALSE;
+ for(j=0; j<4; j++) {
+ old_dims[j] = dims[j];
+ if((dims[j] = (hsize_t)(i == scalar_iter ? 1 : (HDrandom() % 11)))
+ == 0)
+ if((dims[j] = (hsize_t)(HDrandom() % 11)) == 0)
+ zero_dim = TRUE;
+ dim_log[i+1][j] = dims[j];
+ } /* end for */
+
+ /* If writing is disabled, update min_unwritten_dims */
+ if(!writing)
+ for(j=0; j<4; j++)
+ if(old_dims[j] < min_unwritten_dims[j])
+ min_unwritten_dims[j] = old_dims[j];
+
+ /* Resize dataset */
+ if(H5Dset_extent(dset, dims) < 0)
+ RAND4_FAIL_DUMP(i+2, -1, -1, -1, -1)
+
+ if(!zero_dim) {
+ /* Read data from resized dataset */
+ if(H5Sselect_hyperslab(mspace, H5S_SELECT_SET, start, NULL, dims,
+ NULL) < 0)
+ RAND4_FAIL_DUMP(i+2, -1, -1, -1, -1)
+ if(H5Dread(dset, type, mspace, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ RAND4_FAIL_DUMP(i+2, -1, -1, -1, -1)
+
+ /* Verify correctness of read data */
+ if(do_fillvalue) {
+ for(j=0; j<dims[0]; j++)
+ for(k=0; k<dims[1]; k++)
+ for(l=0; l<dims[2]; l++)
+ for(m=0; m<dims[3]; m++)
+ if(j >= valid_dims[0] || k >= valid_dims[1]
+ || l >= valid_dims[2]
+ || m >= valid_dims[3]) {
+ if(((int *)fill_value.p)[0]
+ != ((int *)rbuf[j][k][l][m].p)[0]
+ || ((int *)fill_value.p)[1]
+ != ((int *)rbuf[j][k][l][m].p)[1])
+ RAND4_FAIL_DUMP(i+2, (int)j, (int)k, (int)l, (int)m)
+ } /* end if */
+ else
+ if(((int *)wbuf[j][k][l][m].p)[0]
+ != ((int *)rbuf[j][k][l][m].p)[0]
+ || ((int *)wbuf[j][k][l][m].p)[1]
+ != ((int *)rbuf[j][k][l][m].p)[1])
+ RAND4_FAIL_DUMP(i+2, (int)j, (int)k, (int)l, (int)m)
+ } /* end if */
+ else {
+ for(j=0; j<MIN(dims[0],valid_dims[0]); j++)
+ for(k=0; k<MIN(dims[1],valid_dims[1]); k++)
+ for(l=0; l<MIN(dims[2],valid_dims[2]); l++)
+ for(m=0; m<MIN(dims[3],valid_dims[3]); m++)
+ if(((int *)wbuf[j][k][l][m].p)[0]
+ != ((int *)rbuf[j][k][l][m].p)[0]
+ || ((int *)wbuf[j][k][l][m].p)[1]
+ != ((int *)rbuf[j][k][l][m].p)[1])
+ RAND4_FAIL_DUMP(i+2, (int)j, (int)k, (int)l, (int)m)
+ } /* end else */
+
+ /* Free read buffer */
+ if(H5Dvlen_reclaim(type, mspace, H5P_DEFAULT, rbuf) < 0)
+ TEST_ERROR
+ } /* end if */
+
+ /* Handle the switch between writing and not writing */
+ if(do_sparse && !(i % RAND4_VL_SPARSE_SWITCH)) {
+ writing = !writing;
+ if(!writing) {
+ for(j=0; j<4; j++)
+ min_unwritten_dims[j] = old_dims[j];
+ valid_dims = min_unwritten_dims;
+ } /* end if */
+ else
+ valid_dims = old_dims;
+ } /* end if */
+ } /* end for */
+
+ /* Close */
+ if(H5Sselect_all(mspace) < 0)
+ TEST_ERROR
+ if(H5Dvlen_reclaim(type, mspace, H5P_DEFAULT, wbuf) < 0)
+ TEST_ERROR
+ free(fill_value.p);
+ if(H5Sclose(mspace) < 0)
+ TEST_ERROR
+ if(H5Pclose(my_dcpl) < 0)
+ TEST_ERROR
+ if(H5Dclose(dset) < 0)
+ TEST_ERROR
+ if(H5Tclose(type) < 0)
+ TEST_ERROR
+ if(H5Fclose(file) < 0)
+ TEST_ERROR
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ for(i=0; i<dims[0]; i++)
+ for(j=0; j<dims[1]; j++)
+ for(k=0; k<dims[2]; k++)
+ for(l=0; l<dims[3]; l++) {
+ if(rbuf[i][j][k][l].p)
+ HDfree(rbuf[i][j][k][l].p);
+ if(wbuf[i][j][k][l].p)
+ HDfree(wbuf[i][j][k][l].p);
+ } /* end for */
+ if(fill_value.p)
+ HDfree(fill_value.p);
+ H5Sclose(fspace);
+ H5Sclose(mspace);
+ H5Pclose(dcpl);
+ H5Dclose(dset);
+ H5Tclose(type);
+ H5Fclose(file);
+ } H5E_END_TRY
+ return -1;
+} /* end test_random_rank4_vl */
+
/*
* test_random_rank4_dump: Dump debugging info from test_random_rank4 to screen
* after failure.
diff --git a/test/swmr.c b/test/swmr.c
new file mode 100644
index 0000000..cb7aa81
--- /dev/null
+++ b/test/swmr.c
@@ -0,0 +1,5465 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+*
+* Test program: swmr
+*
+* To test new public routines from SWMR project:
+* H5Pget/set_metadata_read_attempts()
+* H5Fget_metadata_read_retry_info()
+* H5Fstart_swmr_write()
+* H5Pget/set_object_flush_cb()
+* H5Pget/set_append_flush()
+*
+*************************************************************/
+
+#include "hdf5.h"
+#include "h5test.h"
+#include "H5Iprivate.h"
+
+/*
+ * This file needs to access private information from the H5F package.
+ * This file also needs to access the file testing code.
+ */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h" /* File access */
+
+/* This file needs to access the file driver testing code */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#include "H5FDpkg.h" /* File drivers */
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+
+const char *FILENAME[] = {
+ "swmr0", /* 0 */
+ "swmr1", /* 1 */
+ NULL
+};
+
+
+#define NAME_BUF_SIZE 1024 /* Length of file name */
+
+/* Name of message file that is used by test_start_swmr_write_concur() */
+#define DONE_MESSAGE "DONE_MESSAGE" /* The message file to create */
+
+/* Tests for H5Pget/set_metadata_read_attempts(), H5Fget_metadata_read_retry_info */
+static int test_metadata_read_attempts(hid_t in_fapl);
+static int test_metadata_read_retry_info(hid_t in_fapl);
+
+/* Tests for H5Fstart_swmr_write() */
+static int test_start_swmr_write(hid_t in_fapl, hbool_t new_format);
+static int test_err_start_swmr_write(hid_t in_fapl, hbool_t new_format);
+static int test_start_swmr_write_concur(hid_t in_fapl, hbool_t new_format);
+
+/* Tests for H5Pget/set_object_flush_cb() */
+static herr_t flush_cb(hid_t obj_id, void *_udata);
+static int test_object_flush_cb(hid_t in_fapl);
+
+/* Tests for H5Pget/set_append_flush() */
+static herr_t append_cb(hid_t dset_id, hsize_t *cur_dims, void *_udata);
+static herr_t append_cb2(hid_t dset_id, hsize_t *cur_dims, void *_udata);
+static int test_append_flush_generic(void);
+static int test_append_flush_dataset_chunked(hid_t in_fapl);
+static int test_append_flush_dataset_fixed(hid_t in_fapl);
+static int test_append_flush_dataset_multiple(hid_t in_fapl);
+
+/* Tests for file open flags/SWMR flags: single process access */
+static int test_file_lock_same(hid_t fapl);
+static int test_file_lock_swmr_same(hid_t fapl);
+
+/* Tests for file open flags/SWMR flags: concurrent process access */
+static int test_file_lock_concur(hid_t fapl);
+static int test_file_lock_swmr_concur(hid_t fapl);
+
+/* Tests for SWMR VFD flag */
+static int test_swmr_vfd_flag(void);
+
+/*
+ * Tests for H5Pget/set_metadata_read_attemps(), H5Fget_metadata_read_retry_info()
+ */
+
+/*
+ * test_metadata_read_attempts():
+ *
+ * Checks the following two public routines work as specified:
+ * H5Pset_metadata_read_attempts()
+ * H5Pget_metadata_read_attempts()
+ */
+static int
+test_metadata_read_attempts(hid_t in_fapl)
+{
+ hid_t fapl; /* File access property list */
+ hid_t file_fapl; /* The file's access property list */
+ hid_t fid, fid1, fid2; /* File IDs */
+ unsigned attempts; /* The # of read attempts */
+ char filename[NAME_BUF_SIZE]; /* File name */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ TESTING("H5Pget/set_metadata_read_attempts()");
+
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /*
+ * Set A:
+ * Tests on verifying the # of read attempts when:
+ * --setting/getting read attempts from a
+ * file access property list.
+ */
+ /* Get # of read attempts -- should be the default: 1 */
+ if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != 1)
+ TEST_ERROR
+
+ /* Set the # of read attempts to 0--should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_metadata_read_attempts(fapl, 0);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Set the # of read attempts to a # > 0--should succeed */
+ if(H5Pset_metadata_read_attempts(fapl, 9) < 0)
+ TEST_ERROR
+
+ /* Retrieve the # of read attempts -- should be 9 */
+ if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != 9)
+ TEST_ERROR
+
+ /* Set the # of read attempts to the default for non-SWMR access: H5F_METADATA_READ_ATTEMPTS --should succeed */
+ if(H5Pset_metadata_read_attempts(fapl, H5F_METADATA_READ_ATTEMPTS) < 0)
+ TEST_ERROR
+
+ /* Retrieve the # of read attempts -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Set the # of read attempts to the default for SWMR access: H5F_SWMR_METADATA_READ_ATEMPTS --should succeed */
+ if(H5Pset_metadata_read_attempts(fapl, H5F_SWMR_METADATA_READ_ATTEMPTS) < 0)
+ TEST_ERROR
+
+ /* Retrieve the # of read attempts -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Set B:
+ * Tests on verifying read attempts when:
+ * --create a file with non-SWMR access
+ * --opening files with SWMR access
+ * --using default or non-default file access property list
+ */
+ /* Test 1 */
+ /* Create a file with non-SWMR access and default fapl */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 2 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access and default read attempts */
+ if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 3 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 9) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access and fapl (non-default & set to 9) */
+ if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be 9 */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != 9)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 4 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access and fapl (non-default & set to 1) */
+ if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be 1 */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != 1)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 5 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR_READ and fapl (non-default read attempts but unset) */
+ if((fid = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Set C:
+ * Tests on verifying read attempts when:
+ * --create a file with SWMR access
+ * --opening files with non-SWMR access
+ * --using default or non-default file access property list
+ */
+ /* Test 1 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a file with non-SWMR access and default read attempts */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 2 */
+ /* Open the file with non-SWMR access and default fapl */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 3 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 9) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with non-SWMR access and fapl (non-default & set to 9) */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 4 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with non-SWMR access and fapl (non-default & set to 1) */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be 1 */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != 1)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test 5 */
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with non-SWMR_READ and fapl (non-default but unset) */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file's fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 9) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open file again with non-SWMR access and default fapl */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open file again with SWMR access and default read attempts */
+ if((fid = H5Fopen(filename, H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Set D:
+ * Tests on verifying read attempts when:
+ * --create with non-SWMR access
+ * --opening files with SWMR access
+ * --H5reopen the files
+ */
+
+ /* Create a file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open file again with SWMR access and default read attempts */
+ if((fid1 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 9) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open file again with SWMR access and fapl (non-default & set to 9) */
+ if((fid2 = H5Fopen(filename, (H5F_ACC_RDONLY | H5F_ACC_SWMR_READ), fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open fid1 */
+ if((fid = H5Freopen(fid1)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open fid2 */
+ if((fid = H5Freopen(fid2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be H5F_SWMR_METADATA_READ_ATTEMPTS, not 9 */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close all the files */
+ if(H5Fclose(fid1) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Set E:
+ * Tests on verifying read attempts when:
+ * --create with SWMR access
+ * --opening files with non-SWMR access
+ * --H5reopen the files
+ */
+
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open file again with non-SWMR access and default fapl */
+ if((fid1 = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a copy of the parameter fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the # of read attempts */
+ if(H5Pset_metadata_read_attempts(fapl, 9) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open file again with non-SWMR access and fapl (non-default & set to 9) */
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close fapl */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open fid1 */
+ if((fid = H5Freopen(fid1)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open fid2 */
+ if((fid = H5Freopen(fid2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get file's fapl */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from file fapl -- should be H5F_METADATA_READ_ATTEMPTS */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR
+
+ /* Close the file's fapl */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close all the files */
+ if(H5Fclose(fid1) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(file_fapl);
+ H5Fclose(fid);
+ H5Fclose(fid1);
+ H5Fclose(fid2);
+ } H5E_END_TRY;
+
+ return -1;
+
+
+} /* test_metadata_read_attempts() */
+
+/*
+ * test_metadata_read_retry_info():
+ *
+ * Checks whether the public routine H5Fget_metadata_read_retry_info
+ * works as specified.
+ *
+ */
+static int
+test_metadata_read_retry_info(hid_t in_fapl)
+{
+ hid_t fapl, new_fapl; /* File access property list */
+ hid_t fid, fid1; /* File IDs */
+ H5F_retry_info_t info, info1; /* The collection of metadata retries */
+ H5F_t *f = NULL, *f1 = NULL; /* Internal file object pointers */
+ unsigned i, j, n; /* Local index variables */
+ hid_t did1, did2; /* Dataset IDs */
+ hid_t sid; /* Dataspace ID */
+ hid_t dcpl; /* Dataset creation property list */
+ hsize_t dims[2] = {6, 10}; /* Dataset dimensions */
+ char filename[NAME_BUF_SIZE]; /* File name */
+ int buf[6][10], chkbuf1[6][10], chkbuf2[6][10]; /* Buffers for data */
+ hsize_t max_dims_1un[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t max_dims_2un[2] = {500, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {2, 2}; /* Chunk dimensions */
+
+ /* Output message about test being performed */
+ TESTING("H5Fset_metadata_read_retry_info()");
+
+ /* Get a copy of the parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a file without SWMR access */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a chunked dataset with 1 unlimited dimension: extensible array indexing will be used */
+ if((sid = H5Screate_simple(2, dims, max_dims_1un)) < 0)
+ FAIL_STACK_ERROR
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR
+ if((did1 = H5Dcreate2(fid, "DSET_1UNLIM", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a chunked dataset with 2 unlimited dimension: v2 Btree indexing will be used */
+ if((sid = H5Screate_simple(2, dims, max_dims_2un)) < 0)
+ FAIL_STACK_ERROR
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR
+ if((did2 = H5Dcreate2(fid, "DSET_2UNLIM", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize buffer data */
+ for(i = n = 0; i < 6; i++)
+ for(j = 0; j < 10; j++)
+ buf[i][j] = (int)n++;
+
+ /* Write to the 2 datasets */
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Closing */
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 1: tests on nbins
+ */
+ /*
+ * Open a file without SWMR access, default # of read attempts--
+ * info.nbins should be 0
+ * info.retries should all be NULL
+ */
+ /* Open the file without SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did1 = H5Dopen2(fid, "DSET_1UNLIM", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did2 = H5Dopen2(fid, "DSET_2UNLIM", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retries information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 0 */
+ if(info.nbins != 0)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+
+ /*
+ * Open a file with SWMR access, default # of read attempts--
+ * info.nbins should be 2
+ * info.retries should all be NULL
+ */
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retries information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 2 */
+ if(info.nbins != 2)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Open a file with SWMR access, # of read_attempts is 10:
+ * info.nbins should be 1
+ * info.retries should all be NULL
+ */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Pset_metadata_read_attempts(new_fapl, 10) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retry information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 1 */
+ if(info.nbins != 1)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Open a file with SWMR access, # of read attempts is 101:
+ * info.nbins should be 3
+ * info.retries should all be NULL
+ */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_metadata_read_attempts(new_fapl, 101) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retry information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 3 */
+ if(info.nbins != 3)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Open a file with SWMR access, # of read_attempts is 10000:
+ * info.nbins should be 4
+ * info.retries should all be NULL
+ */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Pset_metadata_read_attempts(new_fapl, 10000) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retry information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 4 */
+ if(info.nbins != 4)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Open a file with SWMR access, # of read_attempts is 1:
+ * info.nbins should be 0
+ * info.retries should all be NULL
+ */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Pset_metadata_read_attempts(new_fapl, 1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retry information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 0 */
+ if(info.nbins != 0)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+
+ /*
+ * Case 2: tests on retries info
+ */
+
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did1 = H5Dopen2(fid, "DSET_1UNLIM", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Read data from the dataset */
+ if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the dataset */
+ if((did2 = H5Dopen2(fid, "DSET_2UNLIM", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Read data from the dataset */
+ if(H5Dread(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chkbuf2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retry information */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 2 */
+ if(info.nbins != 2)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+
+ /* Get a pointer to the internal file object */
+ if((f = (H5F_t *)H5I_object(fid)) == NULL)
+ FAIL_STACK_ERROR
+
+ /*
+ * Increment 1st set of retries for metadata items:
+ * a) v2 B-tree leaf node--retries[4][1]
+ * b) Extensive array data block--retries[15][1]
+ * c) File's superblock--retries[20][0]
+ */
+
+ /* v2 B-tree leaf node: log retry 99 for 500 times */
+ for(i = 0; i < 500; i++) {
+ if(H5F_track_metadata_read_retries(f, H5AC_BT2_LEAF_ID, 99) < 0)
+ FAIL_STACK_ERROR
+ }
+
+ /* Extensive array data block: log retry 10 for 1000 times */
+ for(i = 0; i < 1000; i++)
+ if(H5F_track_metadata_read_retries(f, H5AC_EARRAY_DBLOCK_ID, 10) < 0)
+ FAIL_STACK_ERROR
+
+ /* File's superblock: log retry 1 for 1 time */
+ if(H5F_track_metadata_read_retries(f, H5AC_SUPERBLOCK_ID, 1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the collection of metadata read retries */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify retries for v2 B-tree leaf node */
+ if(info.retries[4][0] != 0)
+ TEST_ERROR
+ if(info.retries[4][1] != 500)
+ TEST_ERROR
+
+ /* Verify retries for extensive array data block */
+ if(info.retries[15][0] != 0)
+ TEST_ERROR
+ if(info.retries[15][1] != 1000)
+ TEST_ERROR
+
+ /* Verify retries for file's superblock */
+ if(info.retries[20][0] != 1)
+ TEST_ERROR
+ if(info.retries[20][1] != 0)
+ TEST_ERROR
+
+ /* Free memory for info.retries */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ if(info.retries[i] != NULL)
+ HDfree(info.retries[i]);
+ }
+
+ /*
+ * Increment 2nd set of retries for metadata items:
+ * a) Object header--retries[0][0]
+ * b) Extensive array datablock--retries[15][0]
+ * c) Fixed array header--retries[17][1]
+ * d) File's superblock--retries[20][0]
+ */
+
+ /* Object header: log retry 5 for 5 times */
+ for(i = 0; i < 5; i++) {
+ if(H5F_track_metadata_read_retries(f, H5AC_OHDR_ID, 5) < 0)
+ TEST_ERROR
+ }
+
+ /* Extensive array data block: log retry 4 for 1 time */
+ if(H5F_track_metadata_read_retries(f, H5AC_EARRAY_DBLOCK_ID, 4) < 0)
+ TEST_ERROR
+
+ /* Fixed array header : log retry 50 for 10000 times */
+ for(i = 0; i < 10000; i++) {
+ if(H5F_track_metadata_read_retries(f, H5AC_FARRAY_HDR_ID, 50) < 0)
+ TEST_ERROR
+ }
+
+ /* File's superblock: log retry 1 for 1 more time */
+ if(H5F_track_metadata_read_retries(f, H5AC_SUPERBLOCK_ID, 1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the collection of metadata read retries */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Verify info has both previous + current retries information:
+ */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ switch(i) {
+ case 0: /* Object header */
+ if(info.retries[i][0] != 5)
+ TEST_ERROR
+ if(info.retries[i][1] != 0)
+ TEST_ERROR
+ break;
+
+ case 4: /* v2 B-tree leaf node */
+ if(info.retries[i][0] != 0)
+ TEST_ERROR
+ if(info.retries[i][1] != 500)
+ TEST_ERROR
+ break;
+
+ case 15: /* Extensive array data block */
+ if(info.retries[i][0] != 1)
+ TEST_ERROR
+ if(info.retries[i][1] != 1000)
+ TEST_ERROR
+ break;
+
+ case 17: /* Fixed array header */
+ if(info.retries[i][0] != 0)
+ TEST_ERROR
+ if(info.retries[i][1] != 10000)
+ TEST_ERROR
+ break;
+
+ case 20: /* File's superblock */
+ if(info.retries[i][0] != 2)
+ TEST_ERROR
+ if(info.retries[i][1] != 0)
+ TEST_ERROR
+ break;
+
+ default:
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+ break;
+ }
+ }
+
+ /* Free memory for info.retries */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ HDfree(info.retries[i]);
+
+ /* Closing */
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a copy of the file access property list */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the number of metadata read attempts to 101 */
+ if(H5Pset_metadata_read_attempts(new_fapl, 101) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, new_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if((f = (H5F_t *)H5I_object(fid)) == NULL)
+ FAIL_STACK_ERROR
+
+ /* File's superblock: log retry 1 for 1 time */
+ if(H5F_track_metadata_read_retries(f, H5AC_SUPERBLOCK_ID, 1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the collection of metadata read retries */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 3 */
+ if(info.nbins != 3)
+ TEST_ERROR
+
+ /* Verify retries info */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ switch(i) {
+ case 20: /* File's superblock */
+ if(info.retries[i][0] != 1)
+ TEST_ERROR
+ if(info.retries[i][1] != 0)
+ TEST_ERROR
+ if(info.retries[i][2] != 0)
+ TEST_ERROR
+ break;
+
+ default:
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+ break;
+ }
+ }
+
+ /* Free memory */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ HDfree(info.retries[i]);
+
+ /* Closing */
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 3: Tests on retrieving the collection of retries
+ * when H5Fopen and H5Freopen the same file.
+ */
+
+ /*
+ * Open a file without SWMR access, default # of read attempts--
+ * H5Freopen the same file--
+ * Both files should:
+ * nbins should be 0
+ * retries should all be NULL
+ */
+ /* Open the file without SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open fid */
+ if((fid1 = H5Freopen(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retries information for fid */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve retries information for fid1*/
+ if(H5Fget_metadata_read_retry_info(fid1, &info1)< 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 0 */
+ if(info.nbins != 0)
+ TEST_ERROR
+ if(info1.nbins != 0)
+ TEST_ERROR
+
+ /* Should be all NULL */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ if(info.retries[i] != NULL)
+ TEST_ERROR
+ if(info1.retries[i] != NULL)
+ TEST_ERROR
+ }
+
+ /* Closing */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid1) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Open a file with SWMR access, default # of read attempts:
+ * --increment retries for metadata item: fixed array data block page (retries[19][0])
+ * H5Freopen the same file:
+ * --increment retries for metadata item: free-space sections (retries[9][1])--
+ */
+ /* Open the file with SWMR access */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object for fid */
+ if((f = (H5F_t *)H5I_object(fid)) == NULL)
+ FAIL_STACK_ERROR
+
+ /* Re-open fid */
+ if((fid1 = H5Freopen(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object for fid1 */
+ if((f1 = (H5F_t *)H5I_object(fid1)) == NULL)
+ FAIL_STACK_ERROR
+
+ /* For fid: fixed array data block page--log retry 9 for 500 times */
+ for(i = 0; i < 500; i++) {
+ if(H5F_track_metadata_read_retries(f, H5AC_FARRAY_DBLK_PAGE_ID, 9) < 0)
+ FAIL_STACK_ERROR
+ }
+
+ /* For fid1: free-space sections--log retry 99 for 1000 times */
+ for(i = 0; i < 1000; i++) {
+ if(H5F_track_metadata_read_retries(f1, H5AC_FSPACE_SINFO_ID, 99) < 0)
+ FAIL_STACK_ERROR
+ }
+
+ /* Retrieve the collection of metadata read retries for fid */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the collection of metadata read retries for fid1 */
+ if(H5Fget_metadata_read_retry_info(fid1, &info1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify nbins for fid & fid1: should be 2 */
+ if(info.nbins != 2)
+ TEST_ERROR
+ if(info1.nbins != 2)
+ TEST_ERROR
+
+ /* Verify retries for fid: fixed array data block page */
+ if(info.retries[19][0] != 500)
+ TEST_ERROR
+ if(info.retries[19][1] != 0)
+ TEST_ERROR
+
+ /* Verify retries for fid: free-space sections */
+ /* (Since file was re-opened) */
+ if(info.retries[9][0] != 0)
+ TEST_ERROR
+ if(info.retries[9][1] != 1000)
+ TEST_ERROR
+
+ /* Verify retries for fid1: free-space sections */
+ if(info1.retries[9][0] != 0)
+ TEST_ERROR
+ if(info1.retries[9][1] != 1000)
+ TEST_ERROR
+
+ /* Verify retries for fid1: fixed array data block page */
+ /* (Since file was re-opened) */
+ if(info1.retries[19][0] != 500)
+ TEST_ERROR
+ if(info1.retries[19][1] != 0)
+ TEST_ERROR
+
+ /* Free memory for info.retries and info1.retries */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ if(info.retries[i] != NULL)
+ HDfree(info.retries[i]);
+ if(info1.retries[i] != NULL)
+ HDfree(info1.retries[i]);
+ } /* end for */
+
+ /* Closing */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid1) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(new_fapl);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Sclose(sid);
+ H5Pclose(dcpl);
+ H5Fclose(fid);
+ H5Fclose(fid1);
+ } H5E_END_TRY;
+
+ return -1;
+
+} /* test_metadata_read_retry_info() */
+
+
+
+/*
+ * Tests for H5Fstart_swmr_write()
+ */
+
+/*
+ * test_start_swmr_write():
+ *
+ * Verify SWMR writing is enabled via H5Fstart_swmr_write():
+ * Mainly test for file created with SWMR_WRITE + with/without latest format:
+ * --file will have v3 superblock and all latest version support enabled
+ *
+ * (a) Creating a file
+ * Create a file with SWMR_WRITE + non-latest-format
+ * Create a chunked dataset "dataset1" in the file -- should be using latest chunk indexing
+ * Should fail to enable SWMR as the file is already in SWMR writing mode
+ * Close the file
+ *
+ * (a) Opening a file
+ * Open the file with write + non-latest-format
+ * --file has v3 superblock and all latest version support enabled
+ * Open dataset "dataset1"--keep it open
+ * Create a chunked dataset "dataset2" in the file -- should be using latest chunk indexing -- keep it open
+ * Should succeed in enabling SWMR
+ * Should succeed in writing/reading from "dataset1"
+ * Close "dataset1" and "dataset2"
+ * Create "dataset3" -- should be using latest chunk indexing
+ * Close "dataset3"
+ * Close the file
+ *
+ */
+static int
+test_start_swmr_write(hid_t in_fapl, hbool_t new_format)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property */
+ hid_t gid = -1; /* Group ID */
+ hid_t dcpl = -1; /* Dataset creation property */
+ hid_t file_fapl = -1; /* File access property for the file */
+ hid_t did1 = -1, did2 = -1, did3 = -1; /* Dataset IDs */
+ hid_t sid1 = -1, sid2 = -1, sid3 = -1; /* Dataspace IDs */
+ hsize_t dim[1] = {1}; /* Dimension sizes */
+ hsize_t max_dim[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dim[1] = {2}; /* Chunk dimension sizes */
+ hsize_t dim2[2] = {5, 10}; /* Dimension sizes */
+ hsize_t max_dim2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dim2[2] = {2, 7}; /* Chunk dimension sizes */
+ H5D_chunk_index_t idx_type; /* Dataset chunk index type */
+ int wdata = 99; /* Data to write */
+ int rdata; /* Data read */
+ unsigned attempts; /* The retrieved # of read attempts */
+ char filename[NAME_BUF_SIZE]; /* File name */
+ herr_t ret; /* Return value */
+
+
+ /* Get a copy of the parameter fapl (non-latest-format) */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if(new_format) {
+ TESTING("H5Fstart_swmr_write() when creating/opening a file with latest format");
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+ } else {
+ TESTING("H5Fstart_swmr_write() when creating/opening a file without latest format");
+ } /* end if */
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /*
+ * Case A: when creating a file
+ */
+
+ /* Create the file with SWMR write + non-latest-format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the file's access_property list */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts from the file's fapl */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 100 */
+ if(attempts != (new_format ? H5F_METADATA_READ_ATTEMPTS : H5F_SWMR_METADATA_READ_ATTEMPTS))
+ TEST_ERROR;
+
+ /* Close the property list */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create "dataset1" */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 1, chunk_dim) < 0)
+ FAIL_STACK_ERROR
+ if((sid1 = H5Screate_simple(1, dim, max_dim)) < 0)
+ FAIL_STACK_ERROR;
+ if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the chunk index type */
+ if(H5D__layout_idx_type_test(did1, &idx_type) < 0)
+ FAIL_STACK_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_EARRAY)
+ FAIL_PUTS_ERROR("should be using extensible array as index");
+
+ /* Write to the dataset */
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR for non-latest-format */
+ /* Should succeed in enabling SWMR for latest format */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(new_format) {
+ if(ret < 0) TEST_ERROR
+ } else if(ret >= 0)
+ TEST_ERROR
+
+ /* Read from the dataset */
+ if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Verify the data is correct */
+ if(wdata != rdata)
+ TEST_ERROR
+
+ /* Close "dataset1", dataspace, dataset creation property list */
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the file's access_property list */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 100 */
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR;
+
+ /* Close the file access property list */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /*
+ * Case B: when opening a file
+ */
+
+ /* Open the file again with write + non-latest-format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the file's access_property list */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 1 */
+ if(attempts != H5F_METADATA_READ_ATTEMPTS)
+ TEST_ERROR;
+
+ /* Close the property list */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* open "dataset1", keep it open */
+ if((did1 = H5Dopen2(fid, "dataset1", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Write to "dataset1" */
+ wdata = 88;
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create "dataset2" in the group, keep it open */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 2, chunk_dim2) < 0)
+ FAIL_STACK_ERROR
+ if((sid2 = H5Screate_simple(2, dim2, max_dim2)) < 0)
+ FAIL_STACK_ERROR;
+ if((did2 = H5Dcreate2(gid, "dataset2", H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the chunk index type for "dataset2" */
+ if(H5D__layout_idx_type_test(did2, &idx_type) < 0)
+ FAIL_STACK_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using v2 B-tree chunk indexing");
+
+ /* Should succeed in enabling SWMR writing */
+ if(H5Fstart_swmr_write(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the file's access_property list */
+ if((file_fapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the # of read attempts */
+ if(H5Pget_metadata_read_attempts(file_fapl, &attempts) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should be 100 */
+ if(attempts != H5F_SWMR_METADATA_READ_ATTEMPTS)
+ TEST_ERROR;
+
+ /* Close the property list */
+ if(H5Pclose(file_fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Read from "dataset1" */
+ if(H5Dread(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata) < 0)
+ FAIL_STACK_ERROR;
+ if(wdata != rdata)
+ FAIL_STACK_ERROR;
+
+ /* Close "dataset1" */
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close "dataset2", dataspace, dataset creation property list */
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create "dataset3" */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 2, chunk_dim2) < 0)
+ FAIL_STACK_ERROR
+ if((sid3 = H5Screate_simple(2, dim2, max_dim2)) < 0)
+ FAIL_STACK_ERROR;
+ if((did3 = H5Dcreate2(fid, "dataset3", H5T_NATIVE_INT, sid3, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the chunk index type for "dataset3" */
+ if(H5D__layout_idx_type_test(did3, &idx_type) < 0)
+ FAIL_STACK_ERROR;
+ if(idx_type != H5D_CHUNK_IDX_BT2)
+ FAIL_PUTS_ERROR("should be using v2 B-tree as index");
+
+ /* Close "dataset3", dataspace, dataset creation property list */
+ if(H5Dclose(did3) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid3) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the group */
+ if(H5Gclose(gid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the file access property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fapl);
+ H5Pclose(file_fapl);
+ H5Gclose(gid);
+ H5Pclose(dcpl);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Dclose(did3);
+ H5Sclose(sid1);
+ H5Sclose(sid2);
+ H5Sclose(sid3);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_start_swmr_write() */
+
+/*
+ * test_err_start_swmr_write():
+ *
+ * Verify failure conditions in enabling SWMR writing mode via H5Fstart_swmr_write():
+ * (A) When creating a file:
+ * (1) Create a file with SWMR write + with/without latest format
+ * --fail to enable SWMR because the file is already in SWMR writing mode
+ * If (latest-format):
+ * (2a) Create a file with write + latest format and with opened named datatype
+ * --fail to enable SWMR because there are opened datatype
+ * If !(latest-format):
+ * (2b) Create a file with write + non-latest-format
+ * --fail to enable SWMR because superblock version is not at least 3
+ *
+ * (B) When opening a file which is created with write + latest format:
+ * (1) Open the file with SWMR write + with/without latest format
+ * --fail to enable SWMR because the file is already in SWMR writing mode
+ * (2) Open the file with read only + with/without latest format
+ * --fail to enable SWMR because the file is not opened with write
+ * (3) Open the file with SWMR read only + with/without latest format
+ * --fail to enable SWMR because the file is not opened with write
+ * (4) Open the file with write + with/without latest format and with opened named datatype/attribute
+ * --fail to enable SWMR because there are opened datatype/attribute
+ *
+ * (C) When doing multiple opens for a file:
+ * Create a file with (a) write + latest format or (b) SMWR write + non-latest-format
+ * Close the file
+ * (1) --Open the file with write + with/without latest format
+ * --Enable SWMR writing mode twice
+ * --First time succeed, second time fail
+ * --Close the file
+ (2) --Open the file with write + with/without latest format
+ * --succeed to enable SWMR writing mode
+ * --reopen the same file
+ * --fail to enable SWMR writing mode for the reopened file
+ * --Close the file
+ (3) --Open the file with write + with/without latest format
+ * --open the same file again
+ * --succeed to enable SWMR for the first opened file
+ * --fail to enable SWMR for the second opened file
+ * --Close the file
+ *
+ * (E) (!new_format): When opening a file which is created with write + non-latest-format:
+ * (1) Open the file with SWMR write+latest format
+ * --fail to open due to superblock version not 3
+ * (2) Open the file with SWMR write+non-latest-format
+ * --fail to open due to superblock version not 3
+
+ * (3) Open the file with write+latest format
+ * --fail to enable SWMR due to superblock version not 3
+ * (4) Open the file with write+non-latest-format
+ * --fail to enable SWMR due to superblock version not 3
+ */
+static int
+test_err_start_swmr_write(hid_t in_fapl, hbool_t new_format)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fid2 = -1; /* File ID */
+ hid_t fapl = -1; /* A copy of file access property */
+ hid_t new_fapl = -1; /* A copy of file access property */
+ hid_t gid = -1; /* Group ID */
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t aid = -1; /* Attribute ID */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t bad_fid = -1; /* Test fid (should never represent a real ID) */
+ herr_t ret; /* Return value */
+ char filename[NAME_BUF_SIZE]; /* File name */
+
+ /* Create a copy of the input parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if((new_fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ if(new_format) {
+ TESTING("H5Fstart_swmr_write() on failure conditions for latest format");
+
+ if((fapl = H5Pcopy(new_fapl)) < 0)
+ FAIL_STACK_ERROR
+ } else {
+ TESTING("H5Fstart_swmr_write() on failure conditions for without latest format");
+ }
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+
+ /*
+ * (A) When creating a file:
+ */
+
+ /* Case 1 */
+
+ /* Create the file with SWMR_WRITE + with/without latest format */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl);
+
+ /* Should fail to enable SWMR writing when the file is already in SWMR writing mode */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 2 */
+
+ if(new_format) {
+
+ /* Create the file with write + latest format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create and commit a named datatype */
+ if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tcommit2(fid, "TID", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing when there is an opened named datatype */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should succeed in enabling SWMR writing */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ } else {
+
+ /* Create a file with write + non-latest-format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Should fail to enable SWMR writing because the file's superblock version is not at least 3 */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+ } /* end if */
+
+ /*
+ * (B) When opening a file which is created with the latest format
+ */
+
+ /* Create a file with write + latest format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, new_fapl)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 1 */
+
+ /* Open the file with SWMR write + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing when the file is already in SWMR writing mode */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 2 */
+
+ /* Open the file with read only access + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing when the file is opened with read only access */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 3 */
+
+ /* Open the file file with SWMR read access + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing when the file is opened with SWMR read access only */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 4 */
+
+ /* Open the file with write + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create and commit a named datatype */
+ if((tid = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Tcommit2(fid, "TID", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create dataspace */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Attach an attribute to the named datatype */
+ if((aid = H5Acreate2(tid, "attr", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing when there are opened named datatype and attribute */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Still fail to enable SWMR writing when the attribute is still opened */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should succeed in enabling SWMR writing */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR;
+
+ /* Close the dataspace */
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+
+ /*
+ * (C) Failure cases for multiple opens
+ */
+
+ /* Case 1 */
+
+ /* Create a file with write + with/without latest format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the file with write + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should succeed in enabling SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Should fail for a second call to enable SWMR writing mode */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+
+ /* Case 2 */
+
+ /* Open the file with write + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should succeed in enabling SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Re-open the same file */
+ if((fid2 = H5Freopen(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing mode for fid2 */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid2);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the files */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 3 */
+
+ /* Open the file with write + with/without latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the same file */
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should succeed in enabling SWMR writing for fid */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Should fail to enable SWMR writing for fid2 */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid2);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Close the files */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR;
+
+ if(!new_format) {
+
+ /*
+ * (D) When opening a file which is created without the latest format:
+ */
+
+ /* Create a file with write + without latest format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Case 1 */
+
+ /* Should fail to open the file with SWMR write + latest format due to superblock version not at least 3 */
+ H5E_BEGIN_TRY {
+ bad_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, new_fapl);
+ } H5E_END_TRY;
+ if(bad_fid >= 0)
+ TEST_ERROR
+
+ /* Case 2 */
+
+ /* Should fail to open the file with SWMR write + non-latest-format due to superblock version not at least 3 */
+ H5E_BEGIN_TRY {
+ bad_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+ if(bad_fid >= 0)
+ TEST_ERROR
+
+ /* Case 3 */
+
+ /* Open the file with write + latest format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, new_fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing due to superblock version not at least 3 */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+
+ /* Case 4 */
+
+ /* Open the file with write + non-latest-format */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to enable SWMR writing because the file's superblock version is not at least 3 */
+ H5E_BEGIN_TRY {
+ ret = H5Fstart_swmr_write(fid);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ } /* not new */
+
+ /* Close the file access property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(new_fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Gclose(gid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ H5Fclose(fid2);
+ H5Pclose(fapl);
+ H5Pclose(new_fapl);
+ /* bad_fid should only represent a read ID in the error case.
+ * It never needs to be closed in the normal case.
+ */
+ H5Fclose(bad_fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_err_start_swmr_write() */
+
+/*
+ * test_start_swmr_write_concur():
+ *
+ * The "new_format" parameter indicates whether to create the file with latest format or not.
+ * To have SWMR support, can use either one of the following in creating a file:
+ * (a) Create the file with write + latest format:
+ * --result in v3 superblock with latest chunk indexing types
+ * (b) Create the file with SWMR write + non-latest-format:
+ * --result in v3 superblock with latest chunk indexing types
+ *
+ * Verify concurrent access for H5Fstart_swmr_write()--
+ * (1) Parent: open a file with write access
+ * Child: concurrent open of the file with read & SWMR read (fail)
+ * (2) Parent: open a file with write access; enable SWMR writing mode
+ * Child: concurrent open of the file with read & SWMR read (succeed)
+ * (3) Parent: open a file with write access; enable SWMR writing mode
+ * Child: Concurrent open of the file with read only (fail)
+ * (4) Parent: open a file with write access; enable SWMR writing mode
+ * Child: concurrent open of the file with write access (fail)
+ * (5) Parent: open a file with write access; enable SWMR writing mode
+ * Child: concurrent open of the file with write and SWMR write access (fail)
+ */
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
+
+static int
+test_start_swmr_write_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t H5_ATTR_UNUSED new_format)
+{
+ SKIPPED();
+ HDputs(" Test skipped due to fork or waitpid not defined.");
+ return 0;
+} /* test_start_swmr_write_concur() */
+
+#else /* defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */
+
+static int
+test_start_swmr_write_concur(hid_t in_fapl, hbool_t new_format)
+{
+ hid_t fid; /* File ID */
+ hid_t fapl; /* File access property list */
+ pid_t childpid=0; /* Child process ID */
+ pid_t tmppid; /* Child process ID returned by waitpid */
+ int child_status; /* Status passed to waitpid */
+ int child_wait_option=0; /* Options passed to waitpid */
+ int child_exit_val; /* Exit status of the child */
+ char filename[NAME_BUF_SIZE]; /* File name */
+
+ /* Output message about test being performed */
+ if(new_format) {
+ TESTING("Testing H5Fstart_swmr_write()--concurrent access for latest format");
+ } else {
+ TESTING("Testing H5Fstart_swmr_write()--concurrent access for non-latest-format");
+ } /* end if */
+
+
+
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ if(new_format) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the test file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+ } else {
+ /* Create the test file without latest format but with SWMR write */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /*
+ * Case (1):
+ * Verify concurrent file open with H5F_ACC_RDONLY|H5F_ACC_SWMR_READ
+ * will fail without H5Fstart_swmr_write()
+ */
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Should fail */
+ H5E_BEGIN_TRY {
+ /* Open the test file */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ HDexit(EXIT_FAILURE);
+
+ HDexit(EXIT_SUCCESS);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check exit status of child process */
+ if(WIFEXITED(child_status)) {
+ if((child_exit_val = WEXITSTATUS(child_status)) != 0)
+ TEST_ERROR
+ } else /* child process terminated abnormally */
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case (2):
+ * Verify concurrent file open with H5F_ACC_RDONLY|H5F_ACC_SWMR_READ
+ * will succeed with H5Fstart_swmr_write()
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Should succeed in opening the test file */
+ if((child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ HDexit(EXIT_FAILURE);
+ if(H5Fclose(child_fid) < 0)
+ HDexit(EXIT_FAILURE);
+ HDexit(EXIT_SUCCESS);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Send the message that H5Fstart_swmr_write() completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check exit status of child process */
+ if(WIFEXITED(child_status)) {
+ if((child_exit_val = WEXITSTATUS(child_status)) != 0)
+ TEST_ERROR
+ } else /* Child process terminated abnormally */
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case (3):
+ * Verify concurrent file open with H5F_ACC_RDONLY
+ * will fail with H5Fstart_swmr_write()
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Should fail in opening the test file */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ HDexit(EXIT_FAILURE);
+
+ HDexit(EXIT_SUCCESS);
+ } /* end if */
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Send the message that H5Fstart_swmr_write() completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check exit status of child process */
+ if(WIFEXITED(child_status)) {
+ if((child_exit_val = WEXITSTATUS(child_status)) != 0)
+ TEST_ERROR
+ } else /* Child process terminated abnormally */
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case (4):
+ * Verify concurrent file open with H5F_ACC_RDWR
+ * will fail with H5Fstart_swmr_write()
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Should fail in opening the test file */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ HDexit(EXIT_FAILURE);
+
+ HDexit(EXIT_SUCCESS);
+ } /* end if */
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Send the message that H5Fstart_swmr_write() completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check exit status of child process */
+ if(WIFEXITED(child_status)) {
+ if((child_exit_val = WEXITSTATUS(child_status)) != 0)
+ TEST_ERROR
+ } else /* Child process terminated abnormally */
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case (5):
+ * Verify concurrent file open with H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE
+ * will fail with H5Fstart_swmr_write()
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Should fail in opening the test file */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ HDexit(EXIT_FAILURE);
+
+ HDexit(EXIT_SUCCESS);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0)
+ TEST_ERROR
+
+ /* Send the message that H5Fstart_swmr_write() completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check exit status of child process */
+ if(WIFEXITED(child_status)) {
+ if((child_exit_val = WEXITSTATUS(child_status)) != 0)
+ TEST_ERROR
+ } else /* Child process terminated abnormally */
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+
+} /* test_start_swmr_write_concur() */
+#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
+
+/*
+ * Tests for H5Pset/get_object_flush_cb()
+ */
+
+/* The callback function for object flush property */
+static herr_t
+flush_cb(hid_t H5_ATTR_UNUSED obj_id, void *_udata)
+{
+ unsigned *flush_ct = (unsigned*)_udata;
+ ++(*flush_ct);
+ return 0;
+}
+
+/*
+ * test_object_flush_cb()
+ *
+ * Verify the public routines H5Pget/set_object_flush_cb() work as specified:
+ * 1) To verify the failure condition in setting object flush property
+ * 2) To verify the object flush property values retrieved from a default
+ * file access property list.
+ * 3) To verify the object flush property values retrieved from a non-default
+ * file access property list.
+ * 4) To verify the object flush property values retrieved from a default
+ * file access property list of a file
+ * 5) To verify the object flush property values retrieved from a non-default
+ * file access property list of a file
+ * To verify the object flush callback is invoked when doing H5Oflush(),
+ * H5Dflush(), H5Gflush() and H5Tflush().
+ */
+static int
+test_object_flush_cb(hid_t in_fapl)
+{
+ hid_t fapl = -1; /* A copy of file access property list */
+ hid_t ffapl = -1; /* A file's file access property list */
+ hid_t fid = -1; /* File ID */
+ hid_t gid = -1; /* Group ID */
+ hid_t did1 = -1, did2 = -1; /* Dataset IDs */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t dims[2] = {5, 10}; /* Dataset dimension sizes */
+ int buf[50]; /* Data buffer */
+ H5F_flush_cb_t ret_cb; /* The callback function set in object flush property */
+ void *ret_ct; /* The user data set in object flush property */
+ unsigned flush_ct = 0; /* The user data for object flush property */
+ char filename[NAME_BUF_SIZE]; /* File name */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ TESTING("H5Pget/set_obj_flush_cb()");
+
+ /*
+ * Case (1)
+ * To verify the failure condition in setting object flush property
+ */
+ /* Should fail if the callback function is not defined but user data is defined */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_object_flush_cb(fapl, NULL, &flush_ct);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /*
+ * Case (2)
+ * To verify the object flush property values retrieved from a
+ * default file access property list.
+ */
+
+ /* Create a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve object flush property values for the default file access property list */
+ if(H5Pget_object_flush_cb(fapl, &ret_cb, &ret_ct) < 0)
+ TEST_ERROR
+ /* Should be null */
+ if(ret_cb != NULL || ret_ct != NULL)
+ TEST_ERROR
+
+ /*
+ * Case (3)
+ * To verify the object flush property values retrieved from a
+ * non-default file access property list.
+ */
+ /* Set the object flush property */
+ if(H5Pset_object_flush_cb(fapl, flush_cb, &flush_ct) < 0)
+ TEST_ERROR
+
+ /* Increment the counter */
+ ++flush_ct;
+
+ /* Retrieve object flush property values for the non-default file access property list */
+ if(H5Pget_object_flush_cb(fapl, &ret_cb, &ret_ct) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != flush_cb || *(unsigned *)ret_ct != 1)
+ TEST_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /*
+ * Case (4)
+ * To verify the object flush property values retrieved from a
+ * default file access property list of a file
+ */
+
+ /* Reset values */
+ flush_ct = 0;
+ ret_cb = NULL;
+ ret_ct = NULL;
+
+ /* Make a copy of the input parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the test file: without setting object flush property in fapl */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Retrieve the object flush property values */
+ if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != NULL || ret_ct != NULL)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /*
+ * Cases (5)
+ * To verify the object flush property values retrieved from a non-default
+ * file access property list of a file.
+ * To verify the object flush callback is invoked when doing H5Oflush(),
+ * H5Dflush(), H5Gflush() and H5Tflush().
+ */
+ /* Reset values */
+ flush_ct = 0;
+ ret_cb = NULL;
+ ret_ct = NULL;
+
+ /* Set the object flush property */
+ if(H5Pset_object_flush_cb(fapl, flush_cb, &flush_ct) < 0)
+ FAIL_STACK_ERROR
+
+ /* Open the test file: with object flush property setting in fapl */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create a dataset */
+ if((sid = H5Screate_simple(2, dims, dims)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Create a dataset */
+ if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize data buffer */
+ for(i = 0; i < 50; i++)
+ buf[i] = i + 1;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Flush the dataset object */
+ if(H5Oflush(did1) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the file's file access property list */
+ if((ffapl = H5Fget_access_plist(fid)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Retrieve the object flush property values */
+ if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != flush_cb || *(unsigned *)ret_ct != 1)
+ TEST_ERROR
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Flush the group */
+ if(H5Gflush(gid) < 0)
+ TEST_ERROR
+
+ /* Retrieve the object flush property values */
+ if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != flush_cb || *(unsigned *)ret_ct != 2)
+ TEST_ERROR
+
+ /* Create a dataset */
+ if((did2 = H5Dcreate2(gid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Flush the dataset */
+ if(H5Dflush(did2) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Retrieve the object flush property values */
+ if(H5Pget_object_flush_cb(ffapl, &ret_cb, &ret_ct) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != flush_cb || *(unsigned *)ret_ct != 3)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Gclose(gid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(ffapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(ffapl);
+ H5Sclose(sid);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Gclose(gid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_object_flush_cb() */
+
+/*
+ * Tests for H5Pset/get_append_flush()
+ */
+
+
+/* The callback function for append flush property */
+static herr_t
+append_cb(hid_t H5_ATTR_UNUSED dset_id, hsize_t H5_ATTR_UNUSED *cur_dims, void *_udata)
+{
+ unsigned *count = (unsigned *)_udata;
+ ++(*count++);
+ return 0;
+} /* append_cb() */
+
+
+/* The callback function for append flush property */
+static herr_t
+append_cb2(hid_t H5_ATTR_UNUSED dset_id, hsize_t H5_ATTR_UNUSED *cur_dims, void *_udata)
+{
+ unsigned *count = (unsigned *)_udata;
+ ++(*count++);
+ return 0;
+} /* append_cb2() */
+
+
+
+/*
+ * test_append_flush_generic()
+ *
+ * Verify H5Pget/set_append_flush() work as specified for a generic dataset
+ * access property list:
+ * 1) To verify the append flush property values retrieved from a default
+ * access property list.
+ * -- zero boundary, null callback function, null user data
+ * 2) To verify the failure conditions in setting append flush property:
+ * -- an invalid dataset rank: <= 0, > H5S_MAX_RANK
+ * -- undefined callback but defined user data
+ * -- no boundary specified
+ * -- invalid boundary size: H5S_UNLIMITED, negative value
+ * 3) To verify the append flush property values retrieved from a non-default
+ * access property list.
+ * -- the set callback function, the set user data
+ * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set)
+ */
+static int
+test_append_flush_generic(void)
+{
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hsize_t boundary[3]; /* The boundary for append flush property */
+ unsigned count = 0; /* The user data for append flush property */
+ hsize_t ret_boundary[3]; /* The boundary set in append flush property */
+ H5D_append_cb_t ret_cb; /* The callback function set in append flush property */
+ unsigned *ret_count; /* The user data set in append flush property */
+ herr_t ret; /* The return value */
+
+ TESTING("H5Fget/set_append_flush() for a generic dataset access property list");
+
+
+ /*
+ * Case (1)
+ * To verify the retrieved append flush property values:
+ * -- zero boundary, null callback function, null user data
+ */
+
+ /* Create a copy of dataset access property list */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(dapl, 2, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify expected values */
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0)
+ TEST_ERROR;
+ if(ret_cb != NULL || ret_count != NULL)
+ TEST_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /*
+ * Case (2)
+ * To verify the failure conditions in setting append flush property:
+ * -- an invalid dataset rank: <= 0, > H5S_MAX_RANK
+ * -- no boundary specified
+ * -- undefined callback but defined user data
+ * -- invalid boundary size: H5S_UNLIMITED, negative value
+ */
+
+ /* Create a copy of dataset access property list */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Invalid dataset rank: zero value */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_append_flush(dapl, 0, NULL, NULL, &count);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Invalid dataset rank: > H5S_MAX_RANK */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_append_flush(dapl, H5S_MAX_RANK+1, NULL, NULL, &count);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* No boundary specified */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_append_flush(dapl, 2, NULL, NULL, &count);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Set up a valid boundary */
+ boundary[0] = 1;
+ boundary[1] = 1;
+
+ /* Undefined callback function but defined user data */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_append_flush(dapl, 2, boundary, NULL, &count);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Invalid boundary size: negative value */
+ boundary[0] = (hsize_t)-1;
+ boundary[1] = 1;
+ H5E_BEGIN_TRY {
+ ret = H5Pset_append_flush(dapl, 2, boundary, append_cb, &count);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /* Invalid boundary size: H5S_UNLIMITED */
+ boundary[0] = 1;
+ boundary[1] = H5S_UNLIMITED;
+ H5E_BEGIN_TRY {
+ ret = H5Pset_append_flush(dapl, 2, boundary, append_cb, &count);
+ } H5E_END_TRY;
+ if(ret >= 0)
+ TEST_ERROR
+
+ /*
+ * Case (3)
+ * To verify the append flush property values retrieved from a non-default
+ * access property list:
+ * -- the set callback function, the set user data
+ * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set)
+ */
+ boundary[0] = boundary[1] = 1;
+ boundary[2] = 0;
+ count = 1;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_cb, &count) < 0)
+ FAIL_STACK_ERROR;
+ ++count;
+
+ /* Verify expected values: with boundary rank > set boundary rank */
+ if(H5Pget_append_flush(dapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+ if(ret_boundary[0] != 1 || ret_boundary[1] != 1 || boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb == NULL || ret_count == NULL || *ret_count != 2)
+ TEST_ERROR
+
+ /* Verify expected values: with boundary rank < set boundary rank */
+ HDmemset(ret_boundary, 0, sizeof(ret_boundary));
+ if(H5Pget_append_flush(dapl, 1, ret_boundary, NULL, NULL) < 0)
+ TEST_ERROR
+ if(ret_boundary[0] != 1 || ret_boundary[1] != 0 || boundary[2] != 0)
+ TEST_ERROR;
+
+ /* Closing */
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_append_flush_generic() */
+
+/*
+ * test_append_flush_dataset_chunked()
+ *
+ * Verify H5Pget/set_append_flush() work as specified for a chunked dataset's
+ * access property list:
+ * 1) To verify the append flush property values retrieved from a default
+ * access property list:
+ * -- zero boundary, null callback function, null user data
+ * 2) To verify failure in creating dataset when:
+ * -- the rank set in append flush property is not the same as the dataset's rank
+ * -- boundary (non-zero) is set for a non-extendible dimension
+ * 3) To verify the append flush property values retrieved from a non-default
+ * access property list:
+ * -- the set callback function, the set user data
+ * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set)
+ */
+static int
+test_append_flush_dataset_chunked(hid_t in_fapl)
+{
+ hid_t fid = -1; /* file ID */
+ hid_t fapl = -1; /* A copy of file access property */
+ hid_t did1 = -1, did2 = -1; /* The datset ID */
+ hid_t sid = -1; /* The dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ddapl = -1; /* The dataset access property of the opened dataset */
+
+ hsize_t boundary[3]; /* Boundary size */
+ unsigned count = 0; /* User data */
+
+ hsize_t ret_boundary[3]; /* Boundary size set in the append flush property */
+ H5D_append_cb_t ret_cb; /* The callback function set in the append flush property */
+ unsigned *ret_count; /* The user data set in the append flush property */
+
+ char filename[NAME_BUF_SIZE]; /* file name */
+
+ hsize_t dims[2] = {100, 0}; /* The dataset dimension sizes */
+ hsize_t maxdims[2] = {100, H5S_UNLIMITED}; /* The dataset maximum dimension sizes */
+ hsize_t chunk_dims[2] = {5,2}; /* The chunk dimesion sizes */
+
+ TESTING("H5Fget/set_append_flush() for a chunked dataset's access property list");
+
+ /*
+ * Case (1)--
+ * For a chunked dataset's access property list:
+ * --to verify the append flush property values retrieved from a default access
+ * a default access property list is:
+ * zero rank, zero boundary, null callback function, null user data
+ */
+
+ /* Get a copy of the input parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the test file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a chunked dataset with 1 extendible dimension */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+ if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Get the dataset's access property list */
+ if((ddapl = H5Dget_access_plist(did1)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb != NULL || ret_count != NULL)
+ TEST_ERROR
+
+ /* Close the dataset's access property list */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /*
+ * Case (2)--
+ * For a chunked dataset's access property list:
+ * --to verify failure in creating the dataset when:
+ * --the rank set in append flush property is not the same as the dataset's rank
+ * -- boundary (non-zero) is set for a non-extendible dimension
+ * --to verify failure in opening the dataset
+ * -- boundary (non-zero) is set for a non-extendible dimension
+ */
+ /* Create a copy of dataset access property list */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set boundary dimension rank > the rank of dataset to be created */
+ HDmemset(boundary, 0, sizeof(boundary));
+ if(H5Pset_append_flush(dapl, 3, boundary, NULL, NULL) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should fail to Create the dataset */
+ H5E_BEGIN_TRY {
+ did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl);
+ } H5E_END_TRY;
+ if(did2 >= 0)
+ TEST_ERROR
+
+ /* Set boundary for a non-extendible dimension */
+ boundary[0] = boundary[1] = 1;
+ if(H5Pset_append_flush(dapl, 2, boundary, NULL, NULL) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should fail to create the dataset */
+ H5E_BEGIN_TRY {
+ did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl);
+ } H5E_END_TRY;
+ if(did2 >= 0)
+ TEST_ERROR
+
+ /* Create and close the dataset */
+ if((did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should fail to open the dataset */
+ H5E_BEGIN_TRY {
+ did2 = H5Dopen2(fid, "dataset2", dapl);
+ } H5E_END_TRY;
+ if(did2 >= 0)
+ TEST_ERROR
+
+ /*
+ * Case (3)--
+ * For a chunked dataset's access property list:
+ * --To verify the append flush property values retrieved from a non-default
+ * access property list:
+ * -- the set callback function, the set user data
+ * -- the # of boundary sizes retrieved does not exceed MIN(input ndims, the ndims set)
+ */
+
+ boundary[0] = 0;
+ boundary[1] = 1;
+ if(H5Pset_append_flush(dapl, 2, boundary, append_cb, &count) < 0)
+ FAIL_STACK_ERROR
+ if((did2 = H5Dopen2(fid, "dataset2", dapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the dataset's access property list */
+ if((ddapl = H5Dget_access_plist(did2)) < 0)
+ FAIL_STACK_ERROR
+
+ HDmemset(ret_boundary, 0, sizeof(ret_boundary));
+ ret_cb = NULL;
+ ret_count = NULL;
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != append_cb || ret_count != &count)
+ TEST_ERROR
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 1 || ret_boundary[2] != 0)
+ TEST_ERROR
+
+ HDmemset(ret_boundary, 0, sizeof(ret_boundary));
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 1, ret_boundary, NULL, NULL) < 0)
+ TEST_ERROR
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Pclose(dapl);
+ H5Pclose(ddapl);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Pclose(fapl);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_append_flush_dataset_chunked() */
+
+/*
+ * test_append_flush_dataset_fixed():
+ *
+ * Verify H5Pget/set_append_flush() work as specified for a
+ * non-chunked (fixed size) dataset's access property list:
+ * (1) To verify success in creating the dataset--whatever is set for the append flush property setting
+ * (2) To verify that default append flush property values are retrieved for both
+ * default or non-default access property list:
+ * -- zero boundary, null callback function, null user data
+ */
+static int
+test_append_flush_dataset_fixed(hid_t in_fapl)
+{
+ hid_t fid = -1; /* file ID */
+ hid_t fapl = -1; /* A copy of file access property */
+ hid_t did1 = -1, did2 = -1; /* The datset ID */
+ hid_t sid = -1; /* The dataspace ID */
+ hid_t dapl = -1; /* A copy of dataset access property */
+ hid_t ddapl = -1; /* The dataset access property of the opened dataset */
+
+ hsize_t boundary[3]; /* Boundary size */
+ unsigned count = 0; /* User data */
+
+ hsize_t ret_boundary[3]; /* Boundary size set in the append flush property */
+ H5D_append_cb_t ret_cb; /* The callback function set in the append flush property */
+ unsigned *ret_count; /* The user data set in the append flush property */
+
+ char filename[NAME_BUF_SIZE]; /* file name */
+
+ hsize_t dims[1] = {100};
+
+ TESTING("H5Fget/set_append_flush() for a non-chunked dataset's access property list");
+
+ /*
+ * Case (1)--
+ * For a non-chunked dataset's access property list:
+ * --to verify the append flush property values retrieved from
+ * a default access property list is:
+ * zero boundary, null callback function, null user data
+ */
+
+ /* Get a copy of the input parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the test file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a dataset */
+ if((sid = H5Screate_simple(1, dims, dims)) < 0)
+ FAIL_STACK_ERROR;
+ if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Get the dataset's access property list */
+ if((ddapl = H5Dget_access_plist(did1)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb != NULL || ret_count != NULL)
+ TEST_ERROR
+
+ /* Close the dataset's access property list */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /*
+ * Case (2)--
+ * For a non-chunked dataset's access property list:
+ * --to verify success in creating and opening the dataset even when append flush property
+ * is setup with error conditions:
+ * --the rank set in append flush property is not the same as the dataset's rank
+ * --boundary is set
+ * --to verify the append flush property values are:
+ * zero boundary, null callback function, null user data
+ */
+ /* Create a copy of dataset access property list */
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ boundary[0] = 1;
+ boundary[1] = boundary[2] = 0;
+ if(H5Pset_append_flush(dapl, 3, boundary, append_cb, &count) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should succeed to create the dataset: append flush property has no effect */
+ if((did2 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, dapl)) < 0)
+ TEST_ERROR
+
+ /* Get the dataset's access property list */
+ if((ddapl = H5Dget_access_plist(did2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != NULL || ret_count != NULL)
+ TEST_ERROR
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Should succeed in opening the dataset: append flush property has no effect */
+ if((did2 = H5Dopen2(fid, "dataset2", dapl)) < 0)
+ TEST_ERROR
+
+ /* Get the dataset's access property list */
+ if((ddapl = H5Dget_access_plist(did2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != NULL || ret_count != NULL)
+ TEST_ERROR
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR
+
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR
+ /*
+ * Case (3)--
+ * For a non-chunked dataset's access property list:
+ * --To verify the append flush property values retrieved from a non-default
+ * access property list:
+ * zero boundary, null callback function, null user data
+ */
+
+ HDmemset(boundary, 0, sizeof(boundary));
+ if(H5Pset_append_flush(dapl, 1, boundary, append_cb, &count) < 0)
+ FAIL_STACK_ERROR
+ if((did2 = H5Dopen2(fid, "dataset2", dapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the dataset's access property list */
+ if((ddapl = H5Dget_access_plist(did2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 1, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values */
+ if(ret_cb != NULL || ret_count != NULL)
+ TEST_ERROR
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dapl);
+ H5Pclose(ddapl);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Pclose(fapl);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_append_flush_dataset_fixed() */
+
+/*
+ * test_append_flush_multiple()
+ *
+ * Verify H5Pget/set_append_flush() work as specified for multiple opens
+ * of a dataset:
+ * (1) did1 = H5Dcreate(...dapl1...)
+ * did2 = H5Dopen2(...dapl2)
+ * H5Pget_append_flush(did1...)
+ * H5Pget_append_flush(did2...)
+ * -- should return append flush property values set in dapl1
+ * (2) H5Dcreate(...H5P_DEFAULT...)
+ * H5Dclose()
+ * did1 = H5Dopen2(...dapl1)
+ * did2 = H5Dopen2(..dapl2)
+ * H5Pget_append_flush(did1, ...)
+ * H5Pget_append_flush(did2, ...)
+ * -- should return append flush property values set in dapl1
+ * NOTE:
+ * FOR NOW: return the append flush property values of the create or the very first open
+ * LATER ON: should REJECT subsequent dataset open if append flush property values differ
+ */
+static int
+test_append_flush_dataset_multiple(hid_t in_fapl)
+{
+ hid_t fid = -1; /* file ID */
+ hid_t fapl = -1; /* A copy of file access property */
+ hid_t did1 = -1, did2 = -1; /* The datset ID */
+ hid_t sid = -1; /* The dataspace ID */
+ hid_t dcpl = -1; /* A copy of dataset creation property */
+ hid_t dapl1 = -1; /* A copy of dataset access property */
+ hid_t dapl2 = -1; /* A copy of dataset access property */
+ hid_t ddapl = -1; /* The dataset access property of the opened dataset */
+
+ hsize_t boundary1[3]; /* Boundary size */
+ hsize_t boundary2[3]; /* Boundary size */
+ unsigned count1 = 0; /* User data */
+ unsigned count2 = 0; /* User data */
+
+ hsize_t ret_boundary[3]; /* Boundary size set in the append flush property */
+ H5D_append_cb_t ret_cb; /* The callback function set in the append flush property */
+ unsigned *ret_count; /* The user data set in the append flush property */
+
+ char filename[NAME_BUF_SIZE]; /* file name */
+
+ hsize_t dims[2] = {0, 0}; /* The dataset dimension sizes */
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* The dataset maximum dimension sizes */
+ hsize_t chunk_dims[2] = {5,2}; /* The chunk dimesion sizes */
+
+ TESTING("H5Fget/set_append_flush() for multiple opens of a chunked dataset");
+
+ /*
+ * Case (1)
+ * For a chunked dataset's access property list:
+ * did1 = H5Dcreate(...dapl1...)
+ * did2 = H5Dopen2(...dapl2)
+ * H5Pget_append_flush(did1...)
+ * H5Pget_append_flush(did2...)
+ * -- should return append flush property values set in dapl1
+ */
+
+ /* Create a copy of dataset access property list */
+ if((dapl1 = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+ if((dapl2 = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ boundary1[0] = 0;
+ boundary1[1] = 1;
+ count1 = 0;
+ if(H5Pset_append_flush(dapl1, 2, boundary1, append_cb, &count1) < 0)
+ FAIL_STACK_ERROR
+ boundary2[0] = 1;
+ boundary2[1] = 0;
+ count2 = 0;
+ if(H5Pset_append_flush(dapl2, 2, boundary2, append_cb2, &count2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a copy of the input parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the test file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a chunked dataset with 2 extendible dimensions */
+ if((sid = H5Screate_simple(2, dims, maxdims)) < 0)
+ FAIL_STACK_ERROR;
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ FAIL_STACK_ERROR;
+ if((did1 = H5Dcreate2(fid, "dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl1)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset */
+ if((did2 = H5Dopen2(fid, "dataset1", dapl2)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the dataset's access property list for did1 */
+ if((ddapl = H5Dget_access_plist(did1)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values: should be the setting in dapl1 */
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 1 || ret_boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb != append_cb || ret_count != &count1)
+ TEST_ERROR
+
+ /* Close the dataset's access property list */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the dataset's access property list for did2 */
+ if((ddapl = H5Dget_access_plist(did2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values: should be the setting in dapl1 */
+ if(ret_boundary[0] != 0 || ret_boundary[1] != 1 || ret_boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb != append_cb || ret_count != &count1)
+ TEST_ERROR
+
+ /* Close the dataset's access property list */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+ H5Dclose(did1);
+ H5Dclose(did2);
+
+ /*
+ * Case (2)
+ * For a chunked dataset's access property list:
+ * H5Dcreate(...H5P_DEFAULT...)
+ * H5Dclose()
+ * did1 = H5Dopen2(...dapl1)
+ * did2 = H5Dopen2(..dapl2)
+ * H5Pget_append_flush(did1, ...)
+ * H5Pget_append_flush(did2, ...)
+ * -- should return append flush property values set in dapl1
+ */
+ if((did1 = H5Dcreate2(fid, "dataset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset with append flush setting in dapl2 */
+ if((did1 = H5Dopen2(fid, "dataset2", dapl2)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Open the dataset with append flush setting in dapl1 */
+ if((did2 = H5Dopen2(fid, "dataset2", dapl1)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get the dataset's access property list for did1 */
+ if((ddapl = H5Dget_access_plist(did1)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values: should be the setting in dapl2 */
+ if(ret_boundary[0] != 1 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb != append_cb2 || ret_count != &count2)
+ TEST_ERROR
+
+ /* Close the access property list */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+
+
+ /* Get the dataset's access property list for did2 */
+ if((ddapl = H5Dget_access_plist(did2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve the append flush property values */
+ if(H5Pget_append_flush(ddapl, 3, ret_boundary, &ret_cb, (void **)&ret_count) < 0)
+ TEST_ERROR
+
+ /* Verify expected values: should be the setting in dapl2 */
+ if(ret_boundary[0] != 1 || ret_boundary[1] != 0 || ret_boundary[2] != 0)
+ TEST_ERROR;
+ if(ret_cb != append_cb2 || ret_count != &count2)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Pclose(ddapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dapl1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did1) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(did2) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Pclose(dapl1);
+ H5Pclose(dapl2);
+ H5Pclose(ddapl);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Pclose(fapl);
+ H5Sclose(sid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_append_flush_dataset_multiple() */
+
+
+
+/****************************************************************
+**
+** test_file_lock_same():
+** With the implementation of file locking, this test checks file
+** open with different combinations of flags.
+** This is for single process access.
+**
+*****************************************************************/
+static int
+test_file_lock_same(hid_t in_fapl)
+{
+ hid_t fid = -1, fid2 = -1; /* File IDs */
+ hid_t fapl = -1; /* File access property list */
+ unsigned intent; /* File access flags */
+ char filename[NAME_BUF_SIZE]; /* file name */
+
+ /* Output message about test being performed */
+ TESTING("File open with different combinations of flags--single process access");
+
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[1], fapl, filename, sizeof(filename));
+
+ /*
+ * Case 1: 1) RDWR 2) RDWR : should succeed
+ */
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check file intent */
+ if(H5Fget_intent(fid, &intent) < 0)
+ FAIL_STACK_ERROR
+
+ if(intent != H5F_ACC_RDWR)
+ TEST_ERROR
+
+ /* Open the same file with RDWR */
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check the intent */
+ if(H5Fget_intent(fid2, &intent) < 0)
+ FAIL_STACK_ERROR
+ if(intent != H5F_ACC_RDWR)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 2: 1) RDWR 2) RDONLY : should succeed
+ */
+ /* Open file with RDWR */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check the intent */
+ if(H5Fget_intent(fid, &intent) < 0)
+ FAIL_STACK_ERROR
+ if(intent != H5F_ACC_RDWR)
+ TEST_ERROR
+
+ /* Open file with RDONLY */
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check the intent: should get intent from 1st open */
+ if(H5Fget_intent(fid2, &intent) < 0)
+ FAIL_STACK_ERROR
+ if(intent != H5F_ACC_RDWR)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 3: 1) RDONLY 2) RDWR : should fail
+ */
+ /* Open file with RDONLY */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check the intent */
+ if(H5Fget_intent(fid, &intent) < 0)
+ FAIL_STACK_ERROR
+ if(intent != H5F_ACC_RDONLY)
+ TEST_ERROR
+
+ /* Open file with RDWR should fail */
+ H5E_BEGIN_TRY {
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ } H5E_END_TRY;
+ if(fid2 >= 0)
+ TEST_ERROR
+
+ /* Close first file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 4: 1) RDONLY 2) RDONLY : should succeed
+ */
+ /* Open file with RDONLY */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check the intent */
+ if(H5Fget_intent(fid, &intent) < 0)
+ FAIL_STACK_ERROR
+ if(intent != H5F_ACC_RDONLY)
+ TEST_ERROR
+
+ /* Open file with RDONLY */
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get and check the intent */
+ if(H5Fget_intent(fid2, &intent) < 0)
+ FAIL_STACK_ERROR
+ if(intent != H5F_ACC_RDONLY)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5Fclose(fid2);
+ } H5E_END_TRY;
+
+ return -1;
+} /* end test_file_lock_same() */
+
+/****************************************************************
+**
+** test_file_lock_swmr_same():
+** With the implementation of file locking, this test checks file
+** open with different combinations of flags + SWMR flags.
+** This is for single process access.
+**
+*****************************************************************/
+static int
+test_file_lock_swmr_same(hid_t in_fapl)
+{
+ hid_t fid, fid2; /* File IDs */
+ hid_t fapl; /* File access property list */
+ char filename[NAME_BUF_SIZE]; /* file name */
+
+ /* Output message about test being performed */
+ TESTING("File open with different combinations of flags + SWMR flags--single process access");
+
+ /* Get a copy of the parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[1], fapl, filename, sizeof(filename));
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create a file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Cases a, b, c, d: H5Fopen failure cases
+ */
+
+ /*
+ * Case a: RDWR|SWRM_READ : should fail
+ */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ TEST_ERROR
+
+ /*
+ * Case b: RDWR|SWMM_WRTE|SWMR_READ : should fail
+ */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ TEST_ERROR
+
+ /*
+ * Case c: RDONLY|SWMM_WRITE : should fail
+ */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ TEST_ERROR
+
+ /*
+ * Case d: RDONLY|SWMM_WRITE|SWMR_READ : should fail
+ */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_WRITE|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ TEST_ERROR
+
+ /*
+ * Cases 1 - 12: combinations of different flags for 1st and 2nd opens
+ */
+
+ /*
+ * Case 1: 1) RDWR 2) RDWR|SWMR_WRITE : should fail
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ H5E_BEGIN_TRY {
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+ if(fid2 >= 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 2: 1) RDWR 2) RDONLY|SWMR_READ : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 3: 1) RDWR|SWMR_WRITE 2)RDWR : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 4: 1) RDWR|SWMR_WRITE 2) RDWR|SWMR_WRITE : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 5: 1) RDWR|SWMR_WRITE 2) RDONLY|SWMR_READ : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 6: 1) RDWR|SWMR_WRITE 2) RDONLY : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 7: 1) RDONLY|SWMR_READ 2)RDWR : should fail
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ H5E_BEGIN_TRY {
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ } H5E_END_TRY;
+ if(fid2 >= 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 8: 1) RDONLY|SWMR_READ 2) RDWR|SWMR_WRITE : should fail
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ H5E_BEGIN_TRY {
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+ if(fid2 >= 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 9: 1) RDONLY|SWMR_READ 2) RDONLY|SWMR_READ : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 10: 1) RDONLY|SWMR_READ 2) RDONLY : should succeed
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ TEST_ERROR
+ if((fid2 = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid2) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 11: 1) RDONLY 2) RDWR|SWMR_WRITE: should fail
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ H5E_BEGIN_TRY {
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+ if(fid2 >= 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 12: 1) RDONLY 2) RDONLY|SWMR_READ : should fail
+ */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ H5E_BEGIN_TRY {
+ fid2 = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+ if(fid2 >=0 )
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ H5Fclose(fid2);
+ } H5E_END_TRY;
+
+ return -1;
+} /* end test_file_lock_swmr_same() */
+
+
+/****************************************************************
+**
+** test_file_lock_concur():
+** With the implementation of file locking, this test checks file
+** open with different combinations of flags.
+** This is for concurrent access.
+**
+*****************************************************************/
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK))
+
+static int
+test_file_lock_concur(hid_t H5_ATTR_UNUSED in_fapl)
+{
+ /* Output message about test being performed */
+ TESTING("File open with different combinations of flags--concurrent access");
+ SKIPPED();
+ HDputs(" Test skipped due to fork, waitpid, or flock not defined.");
+ return 0;
+
+} /* end test_file_lock_concur() */
+
+#else
+
+static int
+test_file_lock_concur(hid_t in_fapl)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
+ char filename[NAME_BUF_SIZE]; /* file name */
+ pid_t childpid=0; /* Child process ID */
+ int child_status; /* Status passed to waitpid */
+ int child_wait_option=0; /* Options passed to waitpid */
+
+ /* Output message about test being performed */
+ TESTING("File open with different combinations of flags--concurrent access");
+
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[1], fapl, filename, sizeof(filename));
+
+ /* Create the test file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 1: 1) RDWR 2) RDWR : should fail
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 2: 1) RDWR 2) RDONLY : should fail
+ */
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Opens the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Opens the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 3: 1) RDONLY 2) RDWR : should fail
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Opens the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ } /* end if */
+
+ /* Opens the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 4: 1) RDONLY 2) RDONLY : should succeed
+ */
+
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Opens the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ } H5E_END_TRY;
+
+ /* Should succeed */
+ if(child_fid >= 0) {
+ /* Close the file */
+ if(H5Fclose(child_fid) < 0)
+ FAIL_STACK_ERROR
+ HDexit(EXIT_SUCCESS);
+ } /* end if */
+
+ HDexit(EXIT_FAILURE);
+ } /* end if */
+
+ /* Create file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+
+} /* end test_file_lock_concur() */
+
+#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK)) */
+
+/****************************************************************
+**
+** test_file_lock_swmr_concur(): low-level file test routine.
+** With the implementation of file locking, this test checks file
+** open with different combinations of flags + SWMR flags.
+** This is for concurrent access.
+**
+*****************************************************************/
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
+
+static int
+test_file_lock_swmr_concur(hid_t H5_ATTR_UNUSED in_fapl)
+{
+ /* Output message about test being performed */
+ TESTING("File open with different combintations of flags + SWMR flags--concurrent access");
+ SKIPPED();
+ HDputs(" Test skipped due to fork or waitpid not defined.");
+ return 0;
+
+} /* end test_file_lock_swmr_concur() */
+
+#else
+
+static int
+test_file_lock_swmr_concur(hid_t in_fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t fapl; /* File access property list */
+ char filename[NAME_BUF_SIZE]; /* file name */
+ pid_t childpid=0; /* Child process ID */
+ int child_status; /* Status passed to waitpid */
+ int child_wait_option=0; /* Options passed to waitpid */
+
+ /* Output message about test being performed */
+ TESTING("File open with different combintations of flags + SWMR flags--concurrent access");
+
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[1], fapl, filename, sizeof(filename));
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the test file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 1: 1) RDWR 2) RDWR|SWMR_WRITE : should fail
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 2: 1) RDWR 2) RDONLY|SWMR_READ: should fail
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 3: 1) RDWR|SWMR_WRITE 2) RDWR : should fail
+ */
+
+ /* Remove the message file to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 4: 1) RDWR|SWMR_WRITE 2) RDWR|SWMR_WRITE : should fail
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 5: 1) RDWR|SWMR_WRITE 2) RDONLY|SWMR_READ : should succeed
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+
+ /* Should succeed */
+ if(child_fid >= 0) {
+ if(H5Fclose(child_fid) < 0)
+ FAIL_STACK_ERROR
+ HDexit(EXIT_SUCCESS);
+ }
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 6: 1) RDWR|SWMR_WRITE 2) RDONLY : should fail
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 7: 1) RDONLY|SWMR_READ 2) RDWR : should fail
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 8: 1) RDONLY|SWMR_READ 2) RDWR|SWMR_WRITE : should fail
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 9: 1) RDONLY|SWMR_READ 2) RDONLY|SWMR_READ : should succeed
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+
+ /* Should succeed */
+ if(child_fid >= 0) {
+ if(H5Fclose(child_fid) < 0)
+ FAIL_STACK_ERROR
+ HDexit(EXIT_SUCCESS);
+ }
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 10: 1) RDONLY|SWMR_READ 2) RDONLY : should succeed
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ if((child_fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Should succeed */
+ if(child_fid >= 0) {
+ if(H5Fclose(child_fid) < 0)
+ FAIL_STACK_ERROR
+ HDexit(EXIT_SUCCESS);
+ }
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 11: 1) RDONLY 2) RDWR|SWMR_WRITE : should fail
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDWR|H5F_ACC_SWMR_WRITE, fapl);
+ } H5E_END_TRY;
+
+ /* Should fail */
+ if(child_fid == FAIL)
+ HDexit(EXIT_SUCCESS);
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /*
+ * Case 12: 1) RDONLY 2) RDONLY|SWMR_READ : should succeed
+ */
+
+ /* Remove the message file just to be sure */
+ HDremove(DONE_MESSAGE);
+
+ /* Fork child process */
+ if((childpid = HDfork()) < 0)
+ FAIL_STACK_ERROR
+
+ if(childpid == 0) { /* Child process */
+ hid_t child_fid; /* File ID */
+
+ /* Wait till parent process completes the open */
+ if(h5_wait_message(DONE_MESSAGE) < 0)
+ HDexit(EXIT_FAILURE);
+
+ /* Open the test file */
+ H5E_BEGIN_TRY {
+ child_fid = H5Fopen(filename, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl);
+ } H5E_END_TRY;
+
+ /* Should succeed */
+ if(child_fid >= 0) {
+ if(H5Fclose(child_fid) < 0)
+ FAIL_STACK_ERROR
+ HDexit(EXIT_SUCCESS);
+ }
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* Open the test file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Send the message that the open completes */
+ h5_send_message(DONE_MESSAGE);
+
+ /* Wait for child process to complete */
+ if(HDwaitpid(childpid, &child_status, child_wait_option) < 0)
+ FAIL_STACK_ERROR
+
+ /* Check if child terminated normally */
+ if(WIFEXITED(child_status)) {
+ /* Check exit status of the child */
+ if(WEXITSTATUS(child_status) != 0)
+ TEST_ERROR
+ } else
+ FAIL_STACK_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+
+} /* end test_file_lock_swmr_concur() */
+
+#endif /* !(defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID)) */
+
+static int
+test_swmr_vfd_flag(void)
+{
+ hid_t fid = -1; /* file ID */
+ hid_t sec2_fapl = -1; /* fapl ID of a VFD that supports SWMR writes (sec2) */
+ hid_t bad_fapl = -1; /* fapl ID of a VFD that does not support SWMR writes (stdio) */
+ char filename[NAME_BUF_SIZE]; /* file name */
+
+ TESTING("SWMR-enabled VFD flag functionality");
+
+ /* Attempt to open a file using a SWMR-compatible VFD. */
+
+ if((sec2_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_fapl_sec2(sec2_fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_libver_bounds(sec2_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ h5_fixname(FILENAME[0], sec2_fapl, filename, sizeof(filename));
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE, H5P_DEFAULT, sec2_fapl)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Attempt to open a file using a non-SWMR-compatible VFD. */
+
+ if((bad_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_fapl_stdio(bad_fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_libver_bounds(bad_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ fid = -1;
+ h5_fixname(FILENAME[0], bad_fapl, filename, sizeof(filename));
+ H5E_BEGIN_TRY {
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE, H5P_DEFAULT, bad_fapl);
+ } H5E_END_TRY;
+ if(fid >= 0)
+ TEST_ERROR;
+
+ if(H5Pclose(sec2_fapl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(bad_fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(sec2_fapl);
+ H5Pclose(bad_fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_swmr_vfd_flag() */
+
+#ifdef OUT
+/*
+ * This exposes a bug for H5Orefresh while handling opened objects for H5Fstart_swmr_write().
+ * The boolean to skip file truncation test when reading in superblock will fix the problem.
+ * Will work to move that to test/flushrefresh.c later.
+ */
+static int
+test_bug_refresh(hid_t in_fapl)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fapl;
+ H5F_t *f;
+ hid_t gid1, gid2, gid3, gid4, gid5, gid6, gid7, gid8, gid9;
+ char filename[NAME_BUF_SIZE]; /* File name */
+
+ /* Create a copy of the input parameter in_fapl */
+ if((fapl = H5Pcopy(in_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ TESTING("H5Orefresh failure conditions");
+
+ /* Create a file with the latest format */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ FAIL_STACK_ERROR
+
+ /* Create groups: compact to dense storage */
+ if((gid1 = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid2 = H5Gcreate2(fid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid3 = H5Gcreate2(fid, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid4 = H5Gcreate2(fid, "group4", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid5 = H5Gcreate2(fid, "group5", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid6 = H5Gcreate2(fid, "group6", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid7 = H5Gcreate2(fid, "group7", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid8 = H5Gcreate2(fid, "group8", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if((gid9 = H5Gcreate2(fid, "group9", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ TEST_ERROR
+
+ if(H5Grefresh(gid1) < 0) TEST_ERROR
+ if(H5Grefresh(gid2) < 0) TEST_ERROR
+ if(H5Grefresh(gid3) < 0) TEST_ERROR
+ if(H5Grefresh(gid4) < 0) TEST_ERROR
+ if(H5Grefresh(gid5) < 0) TEST_ERROR
+ if(H5Grefresh(gid6) < 0) TEST_ERROR
+ if(H5Grefresh(gid7) < 0) TEST_ERROR
+ if(H5Grefresh(gid8) < 0) TEST_ERROR
+ if(H5Grefresh(gid9) < 0) TEST_ERROR
+
+ H5Gclose(gid1);
+ H5Gclose(gid2);
+ H5Gclose(gid3);
+ H5Gclose(gid4);
+ H5Gclose(gid5);
+ H5Gclose(gid6);
+ H5Gclose(gid7);
+ H5Gclose(gid8);
+ H5Gclose(gid9);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Gclose(gid1);
+ H5Gclose(gid2);
+ H5Gclose(gid3);
+ H5Gclose(gid4);
+ H5Gclose(gid5);
+ H5Gclose(gid6);
+ H5Gclose(gid7);
+ H5Gclose(gid8);
+ H5Gclose(gid9);
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return -1;
+} /* test_bug_refresh() */
+#endif /* OUT */
+
+/****************************************************************
+**
+** Tests for new public routines introduced from the SWMR project.
+**
+****************************************************************/
+int
+main(void)
+{
+ int nerrors = 0; /* The # of errors */
+ hid_t fapl = -1; /* File access property list ID */
+ char *driver = NULL; /* VFD string (from env variable) */
+
+ /* Skip this test if SWMR I/O is not supported for the VFD specified
+ * by the environment variable.
+ */
+ driver = HDgetenv("HDF5_DRIVER");
+ if(!H5FD_supports_swmr_test(driver)) {
+ printf("This VFD does not support SWMR I/O\n");
+ return EXIT_SUCCESS;
+ }
+
+ /* Set up */
+ h5_reset();
+
+ /* Get file access property list */
+ fapl = h5_fileaccess();
+
+#ifdef OUT
+ nerrors += test_bug_refresh(fapl);
+#endif
+
+ /* Tests on H5Pget/set_metadata_read_attempts() and H5Fget_metadata_read_retry_info() */
+ nerrors += test_metadata_read_attempts(fapl);
+ nerrors += test_metadata_read_retry_info(fapl);
+
+ /* Tests on H5Fstart_swmr_write() */
+ /*
+ * Modify the following routines to test for files:
+ * H5Fcreate(write, latest format) or H5Fcreate(SWMR write, non-latest-format)
+ * --both result in v3 superblock and latest version suppport
+ */
+ nerrors += test_start_swmr_write(fapl, TRUE);
+ nerrors += test_start_swmr_write(fapl, FALSE);
+ nerrors += test_err_start_swmr_write(fapl, TRUE);
+ nerrors += test_err_start_swmr_write(fapl, FALSE);
+ nerrors += test_start_swmr_write_concur(fapl, TRUE);
+ nerrors += test_start_swmr_write_concur(fapl, FALSE);
+
+ /* Tests for H5Pget/set_object_flush_cb() */
+ nerrors += test_object_flush_cb(fapl);
+
+ /* Tests on H5Pget/set_append_flush() */
+ nerrors += test_append_flush_generic();
+ nerrors += test_append_flush_dataset_chunked(fapl);
+ nerrors += test_append_flush_dataset_fixed(fapl);
+ nerrors += test_append_flush_dataset_multiple(fapl);
+
+ /*
+ * Tests for:
+ * file open flags--single process access
+ * file open flags--concurrent access
+ */
+ nerrors += test_file_lock_same(fapl);
+ nerrors += test_file_lock_concur(fapl);
+ /*
+ * Tests for:
+ * file open flags+SWMR flags--single process access
+ * file open flags+SWMR flags--concurrent access
+ *
+ * Modify the following 2 routines to test for files:
+ * H5Fcreate(write, latest format) or H5Fcreate(SWMR write, non-latest-format)
+ * --both result in v3 superblock and latest version suppport
+ */
+ nerrors += test_file_lock_swmr_same(fapl);
+ nerrors += test_file_lock_swmr_concur(fapl);
+
+ /* Tests SWMR VFD compatibility flag.
+ * Only needs to run when the VFD is the default (sec2).
+ */
+ if(NULL == driver || !HDstrcmp(driver, "") || !HDstrcmp(driver, "sec2"))
+ nerrors += test_swmr_vfd_flag();
+
+ if(nerrors)
+ goto error;
+
+ printf("All tests passed.\n");
+
+ h5_cleanup(FILENAME, fapl);
+
+ return 0;
+
+error:
+ nerrors = MAX(1, nerrors);
+ printf("***** %d SWMR TEST%s FAILED! *****\n",
+ nerrors, 1 == nerrors ? "" : "S");
+ return 1;
+
+} /* main() */
+
diff --git a/test/swmr_addrem_writer.c b/test/swmr_addrem_writer.c
new file mode 100644
index 0000000..8ce3f6c
--- /dev/null
+++ b/test/swmr_addrem_writer.c
@@ -0,0 +1,458 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_addrem_writer.c
+ *
+ * Purpose: Adds and removes data to a randomly selected subset of the
+ * datasets in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_reader program. It is also run AFTER a sequential
+ * (not concurrent!) invoking of swmr_writer so the writer
+ * can dump a bunch of data into the datasets. Otherwise,
+ * there wouldn't be much to shrink :)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* The maximum # of records to add/remove from the dataset in one step */
+#define MAX_SIZE_CHANGE 10
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose);
+static int addrem_records(hid_t fid, unsigned verbose, unsigned long nops,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim[2]; /* Dataspace dimension */
+ unsigned u, v; /* Local index variable */
+
+ assert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ fprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ fprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(2 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = dim[1];
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: addrem_records
+ *
+ * Purpose: Adds/removes a specified number of records to random datasets
+ * to the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nops
+ * # of records to read/write in the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+addrem_records(hid_t fid, unsigned verbose, unsigned long nops, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t buf[MAX_SIZE_CHANGE]; /* Write buffer */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
+ unsigned long op_to_flush; /* # of operations before flush */
+ unsigned long u, v; /* Local index variables */
+
+ assert(fid > 0);
+
+ /* Reset the buffer */
+ memset(&buf, 0, sizeof(buf));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate_simple(2, count, NULL)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return -1;
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
+ /* Add and remove records to random datasets, according to frequency
+ * distribution */
+ op_to_flush = flush_count;
+ for(u=0; u<nops; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Decide whether to shrink or expand, and by how much */
+ count[1] = (hsize_t)random() % (MAX_SIZE_CHANGE * 2) + 1;
+
+ if(count[1] > MAX_SIZE_CHANGE) {
+ /* Add records */
+ count[1] -= MAX_SIZE_CHANGE;
+
+ /* Set the buffer's IDs (equal to its position) */
+ for(v=0; v<count[1]; v++)
+ buf[v].rec_id = (uint64_t)symbol->nrecords + (uint64_t)v;
+
+ /* Set the memory space to the correct size */
+ if(H5Sset_extent_simple(mem_sid, 2, count, NULL) < 0)
+ return -1;
+
+ /* Get the coordinates to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
+ return(-1);*/
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords+= count[1];
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &buf) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
+ return -1;*/
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+ } /* end if */
+ else {
+ /* Shrink the dataset's dataspace */
+ if(count[1] > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= count[1];
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+ } /* end else */
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ op_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == op_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ op_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_addrem_writer [-q] [-f <# of operations between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of operations>\n");
+ printf("\n");
+ printf("<# of operations between flushing file contents> should be 0 (for\n");
+ printf("no flushing) or between 1 and (<# of operations> - 1).\n");
+ printf("\n");
+ printf("<# of operations> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), flushing every 1000 operations\n");
+ printf("('-f 1000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nops = 0; /* # of times to grow or shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nops = atol(argv[u]);
+ if(nops <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nops <= 0)
+ usage();
+ if(flush_count >= nops)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of operations between flushes = %ld\n", flush_count);
+ fprintf(stderr, "\t# of operations = %ld\n", nops);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ fprintf(stderr, "Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Adding and removing records\n");
+
+ /* Grow and shrink datasets */
+ if(addrem_records(fid, verbose, (unsigned long)nops, (unsigned long)flush_count) < 0) {
+ fprintf(stderr, "Error adding and removing records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ fprintf(stderr, "Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_check_compat_vfd.c b/test/swmr_check_compat_vfd.c
new file mode 100644
index 0000000..87b87c4
--- /dev/null
+++ b/test/swmr_check_compat_vfd.c
@@ -0,0 +1,59 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Purpose: This is a small program that checks if the HDF5_DRIVER
+ * environment variable is set to a value that supports SWMR.
+ *
+ * It is intended for use in shell scripts.
+ */
+
+#include <stdlib.h>
+
+#include "H5private.h"
+
+/* This file needs to access the file driver testing code */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#include "H5FDpkg.h" /* File drivers */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Inspects the HDF5_DRIVER environment variable, which
+ * determines the VFD that the test harness will use with
+ * the majority of the tests.
+ *
+ * Return: VFD supports SWMR: EXIT_SUCCESS
+ *
+ * VFD does not support SWMR
+ * or failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ char *driver = NULL;
+
+ driver = HDgetenv("HDF5_DRIVER");
+
+ if(H5FD_supports_swmr_test(driver))
+ return EXIT_SUCCESS;
+ else
+ return EXIT_FAILURE;
+
+} /* end main() */
+
diff --git a/test/swmr_common.c b/test/swmr_common.c
new file mode 100644
index 0000000..30e1b3e
--- /dev/null
+++ b/test/swmr_common.c
@@ -0,0 +1,292 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_common.c
+ *
+ * Purpose: Utility functions for the SWMR test code.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+
+#include "swmr_common.h"
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* The SWMR data arrays:
+ *
+ * The code uses a 2-D jagged array of datasets. The first dimension is called
+ * the 'level' and there are five of them.
+ *
+ * #define NLEVELS 5
+ *
+ * The second dimension is the 'count' and there are quite a few datasets per
+ * 'level'.
+ *
+ * unsigned symbol_count[NLEVELS] = {100, 200, 400, 800, 1600};
+ *
+ * These datasets are created when the skeleton is generated and are initially
+ * empty. Each dataset has no upper bound on size (H5S_UNLIMITED). They
+ * are of compound type, with two members: an integer ID and an opaque
+ * 'data part'. The data part is not used by the SWMR testing.
+ *
+ * The SWMR testing will then randomly add and/or remove entries
+ * from these datasets. The selection of the level is skewed by a mapping
+ * table which preferentially hammers on the lower levels with their smaller
+ * number of datasets.
+ *
+ * static unsigned symbol_mapping[NMAPPING] = {0, 0, 0, 0, 1, 1, 2, 3, 4};
+ *
+ * The information about each dataset (name, hid_t, etc.) is stored in a
+ * separate array.
+ *
+ * symbol_info_t *symbol_info[NLEVELS];
+ */
+
+/* An array of dataset levels, used to select the level for a SWMR operation
+ * Note that this preferentially selects the lower levels with their smaller
+ * number of datasets.
+ */
+static unsigned symbol_mapping[NMAPPING] = {0, 0, 0, 0, 1, 1, 2, 3, 4};
+
+/* The number of datasets at each level */
+unsigned symbol_count[NLEVELS] = {100, 200, 400, 800, 1600};
+
+/* Array of dataset information entries (1 per dataset) */
+symbol_info_t *symbol_info[NLEVELS];
+
+
+/*-------------------------------------------------------------------------
+ * Function: choose_dataset
+ *
+ * Purpose: Selects a random dataset in the SWMR file
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: A pointer to information about a dataset.
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+symbol_info_t *
+choose_dataset(void)
+{
+ unsigned level; /* The level of the dataset */
+ unsigned offset; /* The "offset" of the dataset at that level */
+
+ /* Determine level of dataset */
+ level = symbol_mapping[random() % NMAPPING];
+
+ /* Determine the offset of the level */
+ offset = random() % symbol_count[level];
+
+ return &symbol_info[level][offset];
+} /* end choose_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_symbol_datatype
+ *
+ * Purpose: Create's the HDF5 datatype used for elements in the SWMR
+ * testing datasets.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: An HDF5 type ID
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+create_symbol_datatype(void)
+{
+ hid_t sym_type_id; /* Datatype ID for symbol */
+ hid_t opaq_type_id; /* Datatype ID for opaque part of record */
+
+ /* Create opaque datatype to represent other information for this record */
+ if((opaq_type_id = H5Tcreate(H5T_OPAQUE, (size_t)DTYPE_SIZE)) < 0)
+ return -1;
+
+ /* Create compound datatype for symbol */
+ if((sym_type_id = H5Tcreate(H5T_COMPOUND, sizeof(symbol_t))) < 0)
+ return -1;
+
+ /* Insert fields in symbol datatype */
+ if(H5Tinsert(sym_type_id, "rec_id", HOFFSET(symbol_t, rec_id), H5T_NATIVE_UINT64) < 0)
+ return -1;
+ if(H5Tinsert(sym_type_id, "info", HOFFSET(symbol_t, info), opaq_type_id) < 0)
+ return -1;
+
+ /* Close opaque datatype */
+ if(H5Tclose(opaq_type_id) < 0)
+ return -1;
+
+ return sym_type_id;
+} /* end create_symbol_datatype() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_name
+ *
+ * Purpose: Generates a SWMR testing dataset name given a level and
+ * count.
+ * The name is in the format <name>-<level> (%u-%04u).
+ *
+ * Parameters: char *name_buf
+ * Buffer for the created name. Must be pre-allocated.
+ * Since the name is formulaic, this isn't considered an issue.
+ *
+ * unsigned level
+ * The dataset's level
+ *
+ * unsigned count
+ * The dataset's count
+ *
+ * Return: Success: 0
+ *
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+generate_name(char *name_buf, unsigned level, unsigned count)
+{
+ assert(name_buf);
+
+ sprintf(name_buf, "%u-%04u", level, count);
+
+ return 0;
+} /* end generate_name() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_symbols
+ *
+ * Purpose: Initializes the global dataset infomration arrays.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+generate_symbols(void)
+{
+ unsigned u, v; /* Local index variables */
+
+ for(u = 0; u < NLEVELS; u++) {
+ symbol_info[u] = (symbol_info_t *)malloc(symbol_count[u] * sizeof(symbol_info_t));
+ for(v = 0; v < symbol_count[u]; v++) {
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ symbol_info[u][v].name = (char *)malloc(strlen(name_buf) + 1);
+ strcpy(symbol_info[u][v].name, name_buf);
+ symbol_info[u][v].dsid = -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+ } /* end for */
+
+ return 0;
+} /* end generate_symbols() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: shutdown_symbols
+ *
+ * Purpose: Cleans up the global dataset information arrays.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+shutdown_symbols(void)
+{
+ unsigned u, v; /* Local index variables */
+
+ /* Clean up the symbols */
+ for(u = 0; u < NLEVELS; u++) {
+ for(v = 0; v < symbol_count[u]; v++)
+ free(symbol_info[u][v].name);
+ free(symbol_info[u]);
+ } /* end for */
+
+ return 0;
+} /* end shutdown_symbols() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_metadata_retries_info
+ *
+ * Purpose: To retrieve and print the collection of metadata retries for the file.
+ *
+ * Parameters: fid: the currently opened file identifier
+ *
+ * Return: Success: 0
+ * Failure: negative
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+print_metadata_retries_info(hid_t fid)
+{
+ H5F_retry_info_t info;
+ unsigned i;
+
+ /* Retrieve the collection of retries */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ return (-1);
+
+ /* Print information for each non-NULL retries[i] */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ unsigned power;
+ unsigned j;
+
+ if(NULL == info.retries[i])
+ continue;
+
+ fprintf(stderr, "Metadata read retries for item %u:\n", i);
+ power = 1;
+ for(j = 0; j < info.nbins; j++) {
+ if(info.retries[i][j])
+ fprintf(stderr, "\t# of retries for %u - %u retries: %u\n",
+ power, (power * 10) - 1, info.retries[i][j]);
+ power *= 10;
+ } /* end for */
+ } /* end for */
+
+ /* Free memory for each non-NULL retries[i] */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ free(info.retries[i]);
+
+ return 0;
+} /* print_metadata_retries_info() */
+
diff --git a/test/swmr_common.h b/test/swmr_common.h
new file mode 100644
index 0000000..1778f8e
--- /dev/null
+++ b/test/swmr_common.h
@@ -0,0 +1,78 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef _SWMR_COMMON_H
+#define _SWMR_COMMON_H
+
+/* Headers needed */
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+
+#include "hdf5.h"
+#include "h5test.h"
+
+/**********/
+/* Macros */
+/**********/
+
+#define NLEVELS 5 /* # of datasets in the SWMR test file */
+
+#define NMAPPING 9
+
+#define FILENAME "swmr_data.h5" /* SWMR test file name */
+#define DTYPE_SIZE 150 /* Data size in opaque type */
+
+/* The message sent by writer that the file open is done--releasing the file lock */
+#define WRITER_MESSAGE "SWMR_WRITER_MESSAGE"
+
+/************/
+/* Typedefs */
+/************/
+
+/* Information about a symbol/dataset */
+typedef struct {
+ char *name; /* Dataset name for symbol */
+ hid_t dsid; /* Dataset ID for symbol */
+ hsize_t nrecords; /* Number of records for the symbol */
+} symbol_info_t;
+
+/* A symbol's record */
+typedef struct {
+ uint64_t rec_id; /* ID for this record (unique in symbol) */
+ uint8_t info[DTYPE_SIZE]; /* "Other" information for this record */
+} symbol_t;
+
+/********************/
+/* Global Variables */
+/********************/
+extern symbol_info_t *symbol_info[NLEVELS];
+extern unsigned symbol_count[NLEVELS];
+
+/**************/
+/* Prototypes */
+/**************/
+symbol_info_t * choose_dataset(void);
+hid_t create_symbol_datatype(void);
+int generate_name(char *name_buf, unsigned level, unsigned count);
+int generate_symbols(void);
+int shutdown_symbols(void);
+int print_metadata_retries_info(hid_t fid);
+
+#endif /* _SWMR_COMMON_H */
+
diff --git a/test/swmr_generator.c b/test/swmr_generator.c
new file mode 100644
index 0000000..bbc1e18
--- /dev/null
+++ b/test/swmr_generator.c
@@ -0,0 +1,355 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_generator.c
+ *
+ * Purpose: Functions for building and setting up the SWMR test file
+ * and datasets.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define CHUNK_SIZE 50 /* Chunk size for created datasets */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int gen_skeleton(const char *filename, unsigned verbose,
+ unsigned swmr_write, int comp_level, const char *index_type,
+ unsigned random_seed);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: gen_skeleton
+ *
+ * Purpose: Creates the HDF5 file and datasets which will be used in
+ * the SWMR testing.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * int comp_level
+ * The zlib compression level to use. -1 = no compression.
+ *
+ * const char *index_type
+ * The chunk index type (b1 | b2 | ea | fa)
+ *
+ * unsigned random_seed
+ * The random seed to store in the file. The sparse tests use
+ * this value.
+ *
+ * Return: Success: 0
+ *
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+gen_skeleton(const char *filename, unsigned verbose, unsigned swmr_write,
+ int comp_level, const char *index_type, unsigned random_seed)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+#ifdef FILLVAL_WORKS
+ symbol_t fillval; /* Dataset fill value */
+#endif /* FILLVAL_WORKS */
+ unsigned u, v; /* Local index variable */
+
+ assert(filename);
+ assert(index_type);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Can create a file for SWMR support with: (a) (write+latest-format) or (b) (SWMR write+non-latest-format) */
+ if(!swmr_write) {
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+ /* There are two chunk indexes tested here.
+ * With one unlimited dimension, we get the extensible array index
+ * type, with two unlimited dimensions, we get a v-2 B-tree.
+ */
+ if(!strcmp(index_type, "b2"))
+ max_dims[0] = H5S_UNLIMITED;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ fprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ fprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_small_data_block_size(fapl, (hsize_t)(50 * CHUNK_SIZE * DTYPE_SIZE));
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+
+#ifdef QAK
+ H5Pset_link_phase_change(fcpl, 0, 0);
+#endif /* QAK */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Creating file\n");
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (swmr_write ? H5F_ACC_SWMR_WRITE : 0), fcpl, fapl)) < 0)
+ return -1;
+
+ /* Close file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Create attribute with (shared) random number seed - for sparse test */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &random_seed) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+#ifdef FILLVAL_WORKS
+ /* Currently fill values do not work because they can bump the dataspace
+ * message to the second object header chunk. We should enable the fillval
+ * here when this is fixed. -NAF 8/11/11 */
+ memset(&fillval, 0, sizeof(fillval));
+ fillval.rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Pset_fill_value(dcpl, tid, &fillval) < 0)
+ return -1;
+#endif /* FILLVAL_WORKS */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ hid_t dsid; /* Dataset ID */
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ if((dsid = H5Dcreate2(fid, name_buf, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close everythign */
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Tclose(tid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+} /* end gen_skeleton() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_generator [-q] [-s] [-c <deflate compression level>]\n");
+ printf(" [-i <index type>] [-r <random seed>]\n");
+ printf("\n");
+ printf("NOTE: The random seed option is only used by the sparse test. Other\n");
+ printf(" tests specify the random seed as a reader/writer option.\n");
+ printf("\n");
+ printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
+ printf("\n");
+ printf("<index type> should be b2 or ea\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), no SWMR_WRITE mode (no '-s' given) no\n");
+ printf("compression ('-c -1'), v1 b-tree indexing (-i b1), and will generate a random\n");
+ printf("seed (no -r given).\n");
+ printf("\n");
+ exit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ int comp_level = -1; /* Compression level (-1 is no compression) */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned swmr_write = 0; /* Whether to create file with SWMR_WRITE access */
+ const char *index_type = "b1"; /* Chunk index type */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* Compress dataset chunks */
+ case 'c':
+ comp_level = atoi(argv[u + 1]);
+ if(comp_level < -1 || comp_level > 9)
+ usage();
+ u += 2;
+ break;
+
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(strcmp(index_type, "ea")
+ && strcmp(index_type, "b2"))
+ usage();
+ u += 2;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Run with SWMR_WRITE */
+ case 's':
+ swmr_write = 1;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\tswmr writes %s\n", swmr_write ? "on" : "off");
+ fprintf(stderr, "\tcompression level = %d\n", comp_level);
+ fprintf(stderr, "\tindex type = %s\n", index_type);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using generator random seed (used in sparse test only): %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating skeleton file: %s\n", FILENAME);
+
+ /* Generate file skeleton */
+ if(gen_skeleton(FILENAME, verbose, swmr_write, comp_level, index_type, random_seed) < 0) {
+ fprintf(stderr, "Error generating skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_reader.c b/test/swmr_reader.c
new file mode 100644
index 0000000..f7b7e96
--- /dev/null
+++ b/test/swmr_reader.c
@@ -0,0 +1,509 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const char *sym_name,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom);
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = -1; /* The type ID for the SWMR datasets */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const char *sym_name
+ * The name of the dataset from which to read.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const char *sym_name, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+
+ assert(fid >= 0);
+ assert(sym_name);
+ assert(record);
+ assert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose the last record in the dataset */
+ start[1] = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Read record from dataset */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value */
+ if(record->rec_id != start[1]) {
+ fprintf(stderr, "*** ERROR ***\n");
+ fprintf(stderr, "Incorrect record value!\n");
+ fprintf(stderr, "Symbol = '%s', # of records = %lld, record->rec_id = %llu\n", sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ return -1;
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * The "common" datasets are a random selection from among
+ * the level 0 datasets. The "random" datasets are a random
+ * selection from among all the file's datasets. This scheme
+ * ensures that the level 0 datasets are interrogated vigorously.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nseconds
+ * The amount of time to read records (ns).
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned ncommon
+ * The number of common/non-random datasets that will be opened.
+ *
+ * unsigned nrandom
+ * The number of random datasets that will be opened.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL; /* Pointers to array of common dataset IDs */
+ symbol_info_t **sym_rand = NULL; /* Pointers to array of random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hid_t fid; /* SWMR test file ID */
+ hid_t fapl; /* file access property list */
+ symbol_t record; /* The record to read from the dataset */
+ unsigned v; /* Local index variable */
+
+ assert(filename);
+ assert(nseconds != 0);
+ assert(poll_time != 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record read, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * ncommon)))
+ return -1;
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)(random() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * nrandom)))
+ return -1;
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return -1;
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = time(NULL);
+ curr_time = start_time;
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, sym_com[v]->name, &record, mem_sid) < 0)
+ return -1;
+ memset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, sym_rand[v]->name, &record, mem_sid) < 0)
+ return -1;
+ memset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Sleep for the appropriate # of seconds */
+ sleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = time(NULL);
+ } /* end while */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the fapl */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ free(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ free(sym_com);
+ } /* end if */
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_reader [-q] [-s <# of seconds to sleep between polling>]\n");
+ printf(" [-h <# of common symbols to poll>] [-l <# of random symbols to poll>]\n");
+ printf(" [-r <random seed>] <# of seconds to test>\n");
+ printf("\n");
+ printf("<# of seconds to test> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'),\n");
+ printf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n");
+ printf("and will generate a random seed (no -r given).\n");
+ printf("\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = atoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = atoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = atoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = atol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ fprintf(stderr, "\t# of common symbols to poll = %d\n", ncommon);
+ fprintf(stderr, "\t# of random symbols to poll = %d\n", nrandom);
+ fprintf(stderr, "\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using reader random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ fprintf(stderr, "Error generating symbol names!\n");
+ exit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ fprintf(stderr, "Error reading records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ fprintf(stderr, "Error closing symbol datatype!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_remove_reader.c b/test/swmr_remove_reader.c
new file mode 100644
index 0000000..76c44fa
--- /dev/null
+++ b/test/swmr_remove_reader.c
@@ -0,0 +1,522 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_remove_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file. Unlike the regular reader, these
+ * datasets will be shrinking.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_remove_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = -1;
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const char *sym_name,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const char *sym_name
+ * The name of the dataset from which to read.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const char *sym_name, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+
+ assert(fid >= 0);
+ assert(sym_name);
+ assert(record);
+ assert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose a random record in the dataset, choosing the last record half
+ * the time */
+ start[1] = (hsize_t)(random() % (snpoints * 2));
+ if(start[1] > (hsize_t)(snpoints - 1))
+ start[1] = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Read record from dataset */
+#ifdef FILLVAL_WORKS
+ /* When shrinking the dataset, we cannot guarantee that the buffer will
+ * even be touched, unless there is a fill value. Since fill values do
+ * not work with SWMR currently (see note in swmr_generator.c), we
+ * simply initialize rec_id to 0. */
+ record->rec_id = (uint64_t)ULLONG_MAX - 1;
+#else /* FILLVAL_WORKS */
+ record->rec_id = (uint64_t)0;
+#endif /* FILLVAL_WORKS */
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value - note that it may be the fill value, because the
+ * chunk may be deleted before the object header has the updated
+ * dimensions */
+ if(record->rec_id != start[1] && record->rec_id != (uint64_t)0) {
+ fprintf(stderr, "*** ERROR ***\n");
+ fprintf(stderr, "Incorrect record value!\n");
+ fprintf(stderr, "Symbol = '%s', # of records = %lld, record->rec_id = %llx\n", sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ return -1;
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * The "common" datasets are a random selection from among
+ * the level 0 datasets. The "random" datasets are a random
+ * selection from among all the file's datasets. This scheme
+ * ensures that the level 0 datasets are interrogated vigorously.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nseconds
+ * The amount of time to read records (ns).
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned ncommon
+ * The number of common/non-random datasets that will be opened.
+ *
+ * unsigned nrandom
+ * The number of random datasets that will be opened.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL; /* Pointers to array of common dataset IDs */
+ symbol_info_t **sym_rand = NULL; /* Pointers to array of random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hid_t fid; /* SWMR test file ID */
+ hid_t fapl; /* File access property list */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned v; /* Local index variable */
+
+ assert(filename);
+ assert(nseconds != 0);
+ assert(poll_time != 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * ncommon)))
+ return -1;
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)(random() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)malloc(sizeof(symbol_info_t *) * nrandom)))
+ return -1;
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return -1;
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = time(NULL);
+ curr_time = start_time;
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, sym_com[v]->name, &record, mem_sid) < 0)
+ return -1;
+ memset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, sym_rand[v]->name, &record, mem_sid) < 0)
+ return -1;
+ memset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Sleep for the appropriate # of seconds */
+ sleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = time(NULL);
+ } /* end while */
+
+ /* Close the fapl */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ free(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ free(sym_com);
+ } /* end if */
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_remove_reader [-q] [-s <# of seconds to sleep between\n");
+ printf(" polling>] [-h <# of common symbols to poll>] [-l <# of random symbols\n");
+ printf(" to poll>] [-r <random seed>] <# of seconds to test>\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'),\n");
+ printf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n");
+ printf("and will generate a random seed (no -r given).\n");
+ printf("\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = atoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = atoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = atoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = atol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ fprintf(stderr, "\t# of common symbols to poll = %d\n", ncommon);
+ fprintf(stderr, "\t# of random symbols to poll = %d\n", nrandom);
+ fprintf(stderr, "\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using reader random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ fprintf(stderr, "Error generating symbol names!\n");
+ exit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ fprintf(stderr, "Error reading records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ fprintf(stderr, "Error closing symbol datatype!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_remove_writer.c b/test/swmr_remove_writer.c
new file mode 100644
index 0000000..a4f030c
--- /dev/null
+++ b/test/swmr_remove_writer.c
@@ -0,0 +1,383 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_remove_writer.c
+ *
+ * Purpose: Removes data from a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_remove_reader program. It is also run AFTER a sequential
+ * (not concurrent!) invoking of swmr_writer so the writer
+ * can dump a bunch of data into the datasets. Otherwise,
+ * there wouldn't be much to shrink :)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* The maximum number of records to remove in one step */
+#define MAX_REMOVE_SIZE 10
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose, unsigned old);
+static int remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose, unsigned old)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim[2]; /* Dataspace dimensions */
+ unsigned u, v; /* Local index variable */
+
+ assert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ if(!old) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+#ifdef QAK
+/* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ fprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ fprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(2 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = dim[1];
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: remove_records
+ *
+ * Purpose: Removes a specified number of records from random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nshrinks
+ * # of records to remove from the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks, unsigned long flush_count)
+{
+ unsigned long shrink_to_flush; /* # of removals before flush */
+ hsize_t dim[2] = {1,0}; /* Dataspace dimensions */
+ unsigned long u, v; /* Local index variables */
+
+ assert(fid >= 0);
+
+ /* Remove records from random datasets, according to frequency distribution */
+ shrink_to_flush = flush_count;
+ for(u = 0; u < nshrinks; u++) {
+ symbol_info_t *symbol; /* Symbol to remove record from */
+ hsize_t remove_size; /* Size to reduce dataset dimension by */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Shrink the dataset's dataspace */
+ remove_size = (hsize_t)random() % MAX_REMOVE_SIZE + 1;
+ if(remove_size > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= remove_size;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ shrink_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == shrink_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ shrink_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_remove_writer [-q] [-o] [-f <# of shrinks between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of shrinks>\n");
+ printf("\n");
+ printf("<# of shrinks between flushing file contents> should be 0 (for no\n");
+ printf("flushing) or between 1 and (<# of shrinks> - 1)\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n");
+ printf("flushing every 1000 shrinks ('-f 1000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nshrinks = 0; /* # of times to shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned old = 0; /* Whether to use non-latest-format when opening file */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Use non-latest-format when opening file */
+ case 'o':
+ old = 1;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nshrinks = atol(argv[u]);
+ if(nshrinks <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nshrinks <= 0)
+ usage();
+ if(flush_count >= nshrinks)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of shrinks between flushes = %ld\n", flush_count);
+ fprintf(stderr, "\t# of shrinks = %ld\n", nshrinks);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose, old)) < 0) {
+ fprintf(stderr, "Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Removing records\n");
+
+ /* Remove records from datasets */
+ if(remove_records(fid, verbose, (unsigned long)nshrinks, (unsigned long)flush_count) < 0) {
+ fprintf(stderr, "Error removing records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ fprintf(stderr, "Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_sparse_reader.c b/test/swmr_sparse_reader.c
new file mode 100644
index 0000000..c7841a9
--- /dev/null
+++ b/test/swmr_sparse_reader.c
@@ -0,0 +1,451 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_sparse_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file. Unlike the regular reader, these
+ * datasets will be shrinking.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_sparse_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include "swmr_common.h"
+
+#include <unistd.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define TIMEOUT 300
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = (-1);
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const symbol_info_t *symbol
+ * The dataset from which to read (the ID is in the struct).
+ * Must be pre-allocated.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hsize_t start[2] = {0, 0}; /* Hyperslab selection values */
+ hsize_t count[2] = {1, 1}; /* Hyperslab selection values */
+
+ assert(fid >= 0);
+ assert(symbol);
+ assert(record);
+ assert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, symbol->name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Choose the random record in the dataset (will be the same as chosen by
+ * the writer) */
+ start[1] = (hsize_t)random() % symbol->nrecords;
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Symbol = '%s', location = %lld\n", symbol->name, (long long)start);
+
+ /* Read record from dataset */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value */
+ if(record->rec_id != start[1]) {
+ fprintf(stderr, "*** ERROR ***\n");
+ fprintf(stderr, "Incorrect record value!\n");
+ fprintf(stderr, "Symbol = '%s', location = %lld, record->rec_id = %llu\n", symbol->name, (long long)start, (unsigned long long)record->rec_id);
+ return(-1);
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nrecords
+ * The total number of records to read.
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned reopen_count
+ *
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count)
+{
+ hid_t fid; /* File ID */
+ hid_t aid; /* Attribute ID */
+ time_t start_time; /* Starting time */
+ hid_t mem_sid; /* Memory dataspace ID */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned seed; /* Seed for random number generator */
+ unsigned iter_to_reopen = reopen_count; /* # of iterations until reopen */
+ unsigned long u; /* Local index variable */
+ hid_t fapl;
+
+ assert(filename);
+ assert(poll_time != 0);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ H5Pset_fclose_degree(fapl, H5F_CLOSE_SEMI);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+ srandom(seed);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = time(NULL);
+
+ /* Read records */
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol = NULL; /* Symbol (dataset) */
+ htri_t attr_exists; /* Whether the sequence number attribute exists */
+ unsigned long file_u; /* Attribute sequence number (writer's "u") */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Fill in "nrecords" field. Note that this depends on the writer
+ * using the same algorithm and "nrecords" */
+ symbol->nrecords = nrecords / 5;
+
+ /* Wait until we can read the dataset */
+ do {
+ /* Check if sequence attribute exists */
+ if((attr_exists = H5Aexists_by_name(fid, symbol->name, "seq", H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(attr_exists) {
+ /* Read sequence number attribute */
+ if((aid = H5Aopen_by_name(fid, symbol->name, "seq", H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_ULONG, &file_u) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ /* Check if sequence number is at least u - if so, this should
+ * guarantee that this record has been written */
+ if(file_u >= u)
+ break;
+ } /* end if */
+
+ /* Check for timeout */
+ if(time(NULL) >= (time_t)(start_time + (time_t)TIMEOUT)) {
+ fprintf(stderr, "Reader timed out\n");
+ return -1;
+ } /* end if */
+
+ /* Pause */
+ sleep(poll_time);
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ fprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+ iter_to_reopen = reopen_count;
+ } while(1);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Checking dataset %lu\n", u);
+
+ /* Check dataset */
+ if(check_dataset(fid, verbose, symbol, &record, mem_sid) < 0)
+ return -1;
+ memset(&record, 0, sizeof(record));
+
+ /* Check for reopen */
+ iter_to_reopen--;
+ if(iter_to_reopen == 0) {
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Reopening file: %s\n", filename);
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ fprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+ iter_to_reopen = reopen_count;
+ } /* end if */
+ } /* end while */
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ fprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_sparse_reader [-q] [-s <# of seconds to wait for writer>]\n");
+ printf(" [-n <# of reads between reopens>] <# of records>\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second wait ('-s 1') and 1 read\n");
+ printf("between reopens ('-r 1')\n");
+ printf("\n");
+ printf("Note that the # of records *must* be the same as that supplied to\n");
+ printf("swmr_sparse_writer\n");
+ printf("\n");
+ exit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ long nrecords = 0; /* # of records to read */
+ int poll_time = 1; /* # of seconds to sleep when waiting for writer */
+ int reopen_count = 1; /* # of reads between reopens */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variables */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of reads between reopens */
+ case 'n':
+ reopen_count = atoi(argv[u + 1]);
+ if(reopen_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = atoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to read */
+ nrecords = atol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ fprintf(stderr, "\t# of reads between reopens = %d\n", reopen_count);
+ fprintf(stderr, "\t# of records to read = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ fprintf(stderr, "Error generating symbol names!\n");
+ exit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long) nrecords, (unsigned)poll_time, (unsigned)reopen_count) < 0) {
+ fprintf(stderr, "Error reading records from datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ fprintf(stderr, "Error closing symbol datatype!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c
new file mode 100644
index 0000000..e752cb3
--- /dev/null
+++ b/test/swmr_sparse_writer.c
@@ -0,0 +1,460 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+ /*-------------------------------------------------------------------------
+ *
+ * Created: swmr_sparse_writer.c
+ *
+ * Purpose: Writes data to a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_sparse_reader program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+#ifdef OUT
+#define BUSY_WAIT 100000
+#endif /* OUT */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose);
+static int add_records(hid_t fid, unsigned verbose, unsigned long nrecords,
+ unsigned long flush_count);
+static void usage(void);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t aid; /* Attribute ID */
+ unsigned seed; /* Seed for random number generator */
+ unsigned u, v; /* Local index variable */
+
+ assert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ fprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ fprintf(stderr,"mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+ srandom(seed);
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return(-1);
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nrecords
+ * # of records to write to the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}; /* Hyperslab selection values */
+ hsize_t count[2] = {1, 1}; /* Hyperslab selection values */
+ symbol_t record; /* The record to add to the dataset */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ volatile int dummy; /* Dummy varialbe for busy sleep */
+ hsize_t dim[2] = {1,0}; /* Dataspace dimensions */
+ unsigned long u, v; /* Local index variables */
+
+ assert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return -1;
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+ hid_t aid; /* Attribute ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
+ return(-1);*/
+
+ /* If this is the first time the dataset has been opened, extend it and
+ * add the sequence attribute */
+ if(symbol->nrecords == 0) {
+ symbol->nrecords = nrecords / 5;
+ dim[1] = symbol->nrecords;
+
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ if((file_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(symbol->dsid, "seq", H5T_NATIVE_ULONG, file_sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+ } /* end if */
+ else if((aid = H5Aopen(symbol->dsid, "seq", H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the coordinate to write */
+ start[1] = (hsize_t)random() % symbol->nrecords;
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = start[1];
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose a random record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Write the sequence number attribute. Since we synchronize the random
+ * number seed, the readers will always generate the same sequence of
+ * randomly chosen datasets and offsets. Therefore, and because of the
+ * flush dependencies on the object header, the reader will be
+ * guaranteed to see the written data if the sequence attribute is >=u.
+ */
+ if(H5Awrite(aid, H5T_NATIVE_ULONG, &u) < 0)
+ return -1;
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
+ return(-1);*/
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+
+#ifdef OUT
+ /* Busy wait, to let readers catch up */
+ /* If this is removed, also remove the BUSY_WAIT symbol
+ * at the top of the file.
+ */
+ dummy = 0;
+ for(v=0; v<BUSY_WAIT; v++)
+ dummy++;
+ if((unsigned long)dummy != v)
+ return -1;
+#endif /* OUT */
+
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_sparse_writer [-q] [-f <# of records to write between\n");
+ printf(" flushing file contents>] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1)\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given) and flushing every 1000 records\n");
+ printf("('-f 1000')\n");
+ printf("\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = atol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of records between flushes = %ld\n", flush_count);
+ fprintf(stderr, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ fprintf(stderr, "Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ fprintf(stderr, "Error appending records to datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ fprintf(stderr, "Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_start_write.c b/test/swmr_start_write.c
new file mode 100644
index 0000000..0069bc7
--- /dev/null
+++ b/test/swmr_start_write.c
@@ -0,0 +1,739 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_start_write.c
+ *
+ * Purpose: This program enables SWMR writing mode via H5Fstart_swmr_write().
+ * It writes data to a randomly selected subset of the datasets
+ * in the SWMR test file; and it is intended to run concurrently
+ * with the swmr_reader program.
+ *
+ * NOTE: The routines in this program are basically copied and modified from
+ * swmr*.c.
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t create_file(const char *filename, unsigned verbose,
+ const char *index_type, unsigned random_seed);
+static int create_datasets(hid_t fid, int comp_level, unsigned verbose);
+static int create_close_datasets(hid_t fid, int comp_level, unsigned verbose);
+static int open_datasets(hid_t fid, unsigned verbose);
+static hid_t open_file(const char *filename, unsigned verbose);
+
+static int add_records(hid_t fid, unsigned verbose, unsigned long nrecords,
+ unsigned long flush_count);
+static void usage(void);
+
+#define CHUNK_SIZE 50 /* Chunk size for created datasets */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_file
+ *
+ * Purpose: Creates the HDF5 file (without SWMR access) which
+ * which will be used for testing H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * filename: The SWMR test file's name.
+ * verbose: whether verbose console output is desired.
+ * index_type: The chunk index type (b1 | b2 | ea | fa)
+ * random_seed: The random seed to store in the file.
+ * The sparse tests use this value.
+ *
+ * Return: Success: the file ID
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+create_file(const char *filename, unsigned verbose,
+ const char *index_type, unsigned random_seed)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+#ifdef FILLVAL_WORKS
+ symbol_t fillval; /* Dataset fill value */
+#endif /* FILLVAL_WORKS */
+
+ assert(filename);
+ assert(index_type);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* We ALWAYS select the latest file format for SWMR */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* There are two chunk indexes tested here.
+ * With one unlimited dimension, we get the extensible array index
+ * type, with two unlimited dimensions, we get a v-2 B-tree.
+ */
+ if(!strcmp(index_type, "b2"))
+ max_dims[0] = H5S_UNLIMITED;
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Creating file without SWMR access\n");
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ return -1;
+
+ /* Close file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Create attribute with (shared) random number seed - for sparse test */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &random_seed) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ return fid;
+} /* end create_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_datasets
+ *
+ * Purpose: Create datasets (and keep them opened) which will be used for testing
+ * H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * fid: file ID for the SWMR test file
+ * comp_level: the compresssion level
+ * verbose: whether verbose console output is desired.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+create_datasets(hid_t fid, int comp_level, unsigned verbose)
+{
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+ unsigned u, v; /* Local index variable */
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+
+ if((symbol_info[u][v].dsid = H5Dcreate2(fid, symbol_info[u][v].name, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+
+ } /* end for */
+
+ return 0;
+} /* create_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_close_datasets
+ *
+ * Purpose: Create and close datasets which will be used for testing
+ * H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * fid: file ID for the SWMR test file
+ * comp_level: the compresssion level
+ * verbose: whether verbose console output is desired.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+create_close_datasets(hid_t fid, int comp_level, unsigned verbose)
+{
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+ unsigned u, v; /* Local index variable */
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ hid_t dsid; /* Dataset ID */
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ if((dsid = H5Dcreate2(fid, name_buf, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ } /* end for */
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ return 0;
+} /* create_close_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_file
+ *
+ * Purpose: Opens the HDF5 test file without SWMR access.
+ *
+ * Parameters:
+ * filename: The filename of the HDF5 file to open
+ * verbose: whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_file(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+
+ assert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening the file without SWMR access: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ return fid;
+} /* Open file() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_datasets
+ *
+ * Purpose: Opens the datasets.
+ *
+ * Parameters:
+* filename: the filename of the SWMR HDF5 file to open
+ * verbose: whether or not to emit verbose console messages
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+open_datasets(hid_t fid, unsigned verbose)
+{
+ unsigned u, v; /* Local index variable */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return 0;
+} /* open_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters:
+* fid: The file ID of the SWMR HDF5 file
+ * verbose: Whether or not to emit verbose console messages
+ * nrecords: # of records to write to the datasets
+ * flush_count: # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t record; /* The record to add to the dataset */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ unsigned long u, v; /* Local index variables */
+
+ assert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return -1;
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = symbol->nrecords;
+
+ /* Get the coordinate to write */
+ start[1] = symbol->nrecords;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+} /* add_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_start_write [-f <# of records to write between flushing file contents>]\n");
+ printf(" [-i <index type>] [-c <deflate compression level>]\n");
+ printf(" [-r <random seed>] [-q] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1).\n");
+ printf("\n");
+ printf("<index type> should be b2 or ea\n");
+ printf("\n");
+ printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
+ printf("\n");
+ printf("<# of records> must be specified.\n");
+ printf("\n");
+ printf("Defaults to flushing every 10000 records ('-f 10000'),\n");
+ printf("v1 b-tree indexing (-i b1), compression ('-c -1'),\n");
+ printf("will generate a random seed (no -r given), and verbose (no '-q' given)\n");
+ printf("\n");
+ exit(1);
+} /* usage() */
+
+/*
+ * Can test with different scenarios as below:
+ * 1) create_file(), create_datasets(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ * 2) create_file(), create_close_datasets(), open_datasets(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ * 3) create_file(), create_close_datasets(), H5Fclose(),
+ * open_file(), open_dataset(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ */
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 10000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ int comp_level = -1; /* Compression level (-1 is no compression) */
+ const char *index_type = "b1"; /* Chunk index type */
+ unsigned u; /* Local index variable */
+ int temp; /* Temporary variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* Compress dataset chunks */
+ case 'c':
+ comp_level = atoi(argv[u + 1]);
+ if(comp_level < -1 || comp_level > 9)
+ usage();
+ u += 2;
+ break;
+
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(strcmp(index_type, "ea")
+ && strcmp(index_type, "b2"))
+ usage();
+ u += 2;
+ break;
+
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = atol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\tindex type = %s\n", index_type);
+ fprintf(stderr, "\tcompression level = %d\n", comp_level);
+ fprintf(stderr, "\t# of records between flushes = %ld\n", flush_count);
+ fprintf(stderr, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Create the test file */
+ if((fid = create_file(FILENAME, verbose, index_type, random_seed)) < 0) {
+ fprintf(stderr, "Error creating the file...\n");
+ exit(1);
+ }
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Create the datasets in the file */
+ if(create_datasets(fid, comp_level, verbose) < 0) {
+ fprintf(stderr, "Error creating datasets...\n");
+ exit(1);
+ }
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0) {
+ fprintf(stderr, "Error starting SWMR writing mode...\n");
+ exit(1);
+ }
+
+#ifdef OUT
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Creating and closing datasets: %s\n", FILENAME);
+
+ /* Create and close the datasets in the file */
+ if(create_close_datasets(fid, comp_level, verbose) < 0) {
+ fprintf(stderr, "Error creating datasets...\n");
+ exit(1);
+ }
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0) {
+ fprintf(stderr, "Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Open the file */
+ if((fid = open_file(FILENAME, verbose)) < 0) {
+ fprintf(stderr, "Error opening the file...\n");
+ exit(1);
+ }
+
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets: %s\n", FILENAME);
+
+ /* Open the file's datasets */
+ if(open_datasets(fid, verbose) < 0) {
+ fprintf(stderr, "Error opening datasets...\n");
+ exit(1);
+ } /* end if */
+
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0) {
+ fprintf(stderr, "Error starting SWMR writing mode...\n");
+ exit(1);
+ }
+#endif
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ fprintf(stderr, "Error appending records to datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing the file\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ fprintf(stderr, "Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+} /* main() */
diff --git a/test/swmr_writer.c b/test/swmr_writer.c
new file mode 100644
index 0000000..4ab6287
--- /dev/null
+++ b/test/swmr_writer.c
@@ -0,0 +1,431 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_writer.c
+ *
+ * Purpose: Writes data to a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_reader program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <sys/time.h>
+
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose, unsigned old);
+static int add_records(hid_t fid, unsigned verbose, unsigned long nrecords,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose, unsigned old)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ unsigned u, v; /* Local index variable */
+
+ assert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ if(!old) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ fprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ fprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nrecords
+ * # of records to write to the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t record; /* The record to add to the dataset */
+ H5AC_cache_config_t mdc_config_orig; /* Original metadata cache configuration */
+ H5AC_cache_config_t mdc_config_cork; /* Corked metadata cache configuration */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ unsigned long u, v; /* Local index variables */
+
+ assert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ memset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Get the current metadata cache configuration, and set up the corked
+ * configuration */
+ mdc_config_orig.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ if(H5Fget_mdc_config(fid, &mdc_config_orig) < 0)
+ return -1;
+ memcpy(&mdc_config_cork, &mdc_config_orig, sizeof(mdc_config_cork));
+ mdc_config_cork.evictions_enabled = FALSE;
+ mdc_config_cork.incr_mode = H5C_incr__off;
+ mdc_config_cork.flash_incr_mode = H5C_flash_incr__off;
+ mdc_config_cork.decr_mode = H5C_decr__off;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = symbol->nrecords;
+
+ /* Get the coordinate to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_cork) < 0)
+ return(-1);*/
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ /*if(H5Fset_mdc_config(fid, &mdc_config_orig) < 0)
+ return -1;*/
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_writer [-q] [-o] [-f <# of records to write between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1).\n");
+ printf("\n");
+ printf("<# of records> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n");
+ printf("flushing every 10000 records ('-f 10000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ exit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 10000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned old = 0; /* Whether to use non-latest-format when opening file */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = atol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = atoi(argv[u + 1]);
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Use non-latest-format when opening file */
+ case 'o':
+ old = 1;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = atol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ fprintf(stderr, "Parameters:\n");
+ fprintf(stderr, "\t# of records between flushes = %ld\n", flush_count);
+ fprintf(stderr, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ gettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ srandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ fprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose, old)) < 0) {
+ fprintf(stderr, "Error opening skeleton file!\n");
+ exit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE);
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ fprintf(stderr, "Error appending records to datasets!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ fprintf(stderr, "Error releasing symbols!\n");
+ exit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ fprintf(stderr, "Error closing file!\n");
+ exit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/tarrold.h5 b/test/tarrold.h5
index 7747ce4..048838c 100644
--- a/test/tarrold.h5
+++ b/test/tarrold.h5
Binary files differ
diff --git a/test/test_filters_be.h5 b/test/test_filters_be.h5
index c4c127b..aadb372 100644
--- a/test/test_filters_be.h5
+++ b/test/test_filters_be.h5
Binary files differ
diff --git a/test/test_filters_le.h5 b/test/test_filters_le.h5
index ff8b846..c29fa0a 100644
--- a/test/test_filters_le.h5
+++ b/test/test_filters_le.h5
Binary files differ
diff --git a/test/test_usecases.sh.in b/test/test_usecases.sh.in
new file mode 100644
index 0000000..b88aee7
--- /dev/null
+++ b/test/test_usecases.sh.in
@@ -0,0 +1,169 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests the use cases of swmr features.
+#
+# Created:
+# Albert Cheng, 2013/06/01.
+# Modified:
+#
+
+# This is work in progress.
+# For now, it shows how to run the test cases programs. It only verifies the
+# exit codes are okay (0).
+
+srcdir=@srcdir@
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [[ $rc != 0 ]] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR"
+ echo
+ echo "SWMR use case tests skipped"
+ echo
+ exit 0
+fi
+
+# Define symbols
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_VALUE=$EXIT_SUCCESS # Default all tests succeed
+RESULT_PASSED=" PASSED"
+RESULT_FAILED="*FAILED*"
+RESULT_SKIP="-SKIP-"
+USECASES_PROGRAMS="use_append_chunk use_append_mchunks"
+TESTNAME="Use Case"
+
+# Define variables
+nerrors=0
+verbose=yes
+
+# Source in the output filter function definitions.
+. $srcdir/../bin/output_filter.sh
+
+# Define functions
+# Print a line-line message left justified in a field of 72 characters.
+# Results can be " PASSED", "*FAILED*", "-SKIP-", up to 8 characters
+# wide.
+# SPACES should be at least 71 spaces. ($* + ' ' + 71 + 8 >= 80)
+#
+TESTING() {
+ SPACES=" "
+ echo "$* $SPACES" | cut -c1-72 | tr -d '\012'
+}
+
+# Run a test and print PASS or *FAIL*. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display the
+# difference between the actual output and the expected output. The
+# expected output is given as the first argument to this function and
+# the actual output file is calculated by replacing the `.ddl' with
+# `.out'. The actual output is not removed if $HDF5_NOCLEANUP has a
+# non-zero value.
+# ADD_H5_TEST
+TOOLTEST() {
+ program=$1
+ shift
+
+ actual="$program.out"
+ actual_err="$program.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+
+ # Run test.
+ TESTING $program $@
+ (
+ $RUNSERIAL ./$program "$@"
+ ) >$actual 2>$actual_err
+ exit_code=$?
+
+ # save actual and actual_err in case they are needed later.
+ cp $actual $actual_sav
+ STDOUT_FILTER $actual
+ cp $actual_err $actual_err_sav
+ STDERR_FILTER $actual_err
+ cat $actual_err >> $actual
+
+ if [ $exit_code -eq 0 ];then
+ echo "$RESULT_PASSED"
+ else
+ echo "$RESULT_FAILED"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && sed 's/^/ /' < $actual
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err $actual_sav $actual_err_sav $actual_ext
+ fi
+}
+
+# run tests for H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled here temporary
+USECORK=use_disable_mdc_flushes
+for p in $USECORK; do
+ TOOLTEST $p
+ TOOLTEST $p -y 3
+ TOOLTEST $p -n 3000
+ TOOLTEST $p -n 5000
+done
+
+# run write order test here temporary
+WRITEORDER=twriteorder
+for p in $WRITEORDER; do
+ TOOLTEST $p
+ TOOLTEST $p -b 1000
+ TOOLTEST $p -p 3000
+ TOOLTEST $p -n 2000
+ TOOLTEST $p -l w
+ TOOLTEST $p -l r
+done
+
+# Report test results
+if test $nerrors -eq 0 ; then
+ echo "$WRITEORDER test passed."
+else
+ echo "$WRITEORDER test failed with $nerrors errors."
+ EXIT_VALUE=$EXIT_FAILURE
+ nerrors=0 # reset nerror for the regular tests below.
+fi
+
+# main body
+for p in $USECASES_PROGRAMS; do
+ TOOLTEST ./$p
+ TOOLTEST ./$p -z 256
+ tmpfile=/tmp/datatfile.$$
+ TOOLTEST ./$p -f $tmpfile; rm -f $tmpfile
+ TOOLTEST ./$p -l w
+ TOOLTEST ./$p -l r
+ # use case 1.9, testing with multi-planes chunks
+ TOOLTEST ./$p -z 256 -y 5 # 5 planes chunks
+ # cleanup temp datafile
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $p.h5
+ fi
+done
+
+
+# Report test results and exit
+if test $nerrors -eq 0 ; then
+ echo "All $TESTNAME tests passed."
+else
+ echo "$TESTNAME tests failed with $nerrors errors."
+ EXIT_VALUE=$EXIT_FAILURE
+fi
+
+exit $EXIT_VALUE
diff --git a/test/testflushrefresh.sh.in b/test/testflushrefresh.sh.in
new file mode 100644
index 0000000..e7593e3
--- /dev/null
+++ b/test/testflushrefresh.sh.in
@@ -0,0 +1,196 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+#
+# Test script for the flush/evict single objects feature.
+#
+# This test file doesn't actually perform any tests, rather, it acts
+# as a process manager for the 'flushrefresh' test file, which is where
+# the tests are actually housed. The reason this script exists is because
+# the verification of this feature needs to occur in separate processes
+# from the one in which the file is being manipulated in. (i.e., we have
+# a single writer process, and various reader processes spawning off
+# and doing the verification that individual objects are being
+# correctly flushed).
+#
+# Programmer:
+# Mike McGreevy
+# Tuesday, July 20, 2010
+
+###############################################################################
+## test variables
+###############################################################################
+
+# Number of errors encountered during test run.
+nerrors=0
+
+# Set up a function to check the current time since the epoch - ideally, we'd
+# like to use Perl. If it wasn't detected by configure, then use date, though
+# this is less portable and might cause problems on machines that don't
+# recognize the +%s option (like Solaris).
+#
+# Note that PERL will resolve to true or false, not a path.
+PERL=@PERL@
+if test -n "$PERL"; then
+ TimeStamp()
+ {
+ time=`perl -e 'print int(time)'`
+ echo "$time"
+ }
+else
+ TimeStamp()
+ {
+ time=`date +%s`
+ echo "$time"
+ }
+fi
+
+###############################################################################
+## Main
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "flush/refresh tests skipped"
+ echo
+ exit 0
+fi
+
+# ========================
+# Launch the Test Program.
+# ========================
+./flushrefresh &
+pid_main=$!
+
+# =================================================
+# Set up/initialize some variables to be used later
+# =================================================
+startsignal=flushrefresh_VERIFICATION_START
+endsignal=flushrefresh_VERIFICATION_DONE
+timeout_length=300
+timedout=0
+verification_done=0
+
+# =======================================
+# Run flush verification on test program.
+# =======================================
+
+until [ $verification_done -eq 1 ]; do
+
+ # Wait for signal from test program that verification routine can run.
+ before=`TimeStamp`
+ until [ -s $startsignal ]; do
+ after=`TimeStamp`
+ timediff=`expr $after - $before`
+ if [ $timediff -gt $timeout_length ]; then
+ nerrors=`expr $nerrors + 1`
+ timedout=1
+ break
+ fi
+ done
+
+ # Check to see if we timed out looking for the signal before continuing.
+ if [ $timedout -gt 0 ]; then
+ echo timed out waiting for signal from test program.
+ break
+ fi
+
+ # Read in test routine parameters from signal file, then delete signal file.
+ param1=`head -1 $startsignal`
+ param2=`tail -1 $startsignal`
+ rm $startsignal
+
+ # Check if we're done with verifications, otherwise run the specified verification.
+ if [ "$param1" = "VERIFICATION_DONE" ]; then
+ verification_done=1
+ echo "all flush verification complete" > $endsignal
+ else
+ ./flushrefresh $param1 $param2
+ echo "verification flush process done" > $endsignal
+ fi
+
+done
+
+# =========================================
+# Run refresh verification on test program.
+# =========================================
+if [ $timedout -eq 0 ]; then
+ until [ $verification_done -eq 2 ]; do
+
+ # Wait for signal from test program that verification routine can run.
+ before=`TimeStamp`
+ until [ -s $startsignal ]; do
+ after=`TimeStamp`
+ timediff=`expr $after - $before`
+ if [ $timediff -gt $timeout_length ]; then
+ nerrors=`expr $nerrors + 1`
+ timedout=1
+ break
+ fi
+ done
+
+ # Check to see if we timed out looking for the signal before continuing.
+ if [ $timedout -gt 0 ]; then
+ echo timed out waiting for signal from test program.
+ break
+ fi
+
+ # Read in test routine parameter from signal file, then delete signal file.
+ param1=`head -n 1 $startsignal`
+ rm $startsignal
+
+ # Check if we're done with verifications, otherwise run the specified verification.
+ if [ "$param1" = "VERIFICATION_DONE" ]; then
+ verification_done=2
+ echo "all refresh verification complete" > $endsignal
+ else
+ ./flushrefresh $param1
+ echo "refresh verifiction process done" > $endsignal
+ fi
+
+ done
+fi
+
+# ============================================
+# Wait for main to finish up, and end testing.
+# ============================================
+wait $pid_main
+if test $? -ne 0; then
+ echo flushrefresh had error
+ nerrors=`expr $nerrors + 1`
+fi
+
+###############################################################################
+## Report and exit
+###############################################################################
+
+if test $nerrors -eq 0 ; then
+ echo "flush/refresh objects tests passed."
+ exit 0
+else
+ echo "flush/refresh objects tests failed with $nerrors errors."
+ exit 1
+fi
diff --git a/test/testswmr.sh.in b/test/testswmr.sh.in
new file mode 100644
index 0000000..5165fd1
--- /dev/null
+++ b/test/testswmr.sh.in
@@ -0,0 +1,497 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the swmr feature.
+#
+# Created:
+# Albert Cheng, 2009/07/22
+
+srcdir=@srcdir@
+
+###############################################################################
+## test parameters
+###############################################################################
+
+Nreaders=5 # number of readers to launch
+Nrdrs_spa=3 # number of sparse readers to launch
+Nrecords=200000 # number of records to write
+Nrecs_rem=40000 # number of times to shrink
+Nrecs_spa=20000 # number of records to write in the sparse test
+Nsecs_add=5 # number of seconds per read interval
+Nsecs_rem=3 # number of seconds per read interval
+Nsecs_addrem=8 # number of seconds per read interval
+nerrors=0
+
+###############################################################################
+## definitions for message file to coordinate test runs
+###############################################################################
+WRITER_MESSAGE=SWMR_WRITER_MESSAGE # The message file created by writer that the open is complete
+ # This should be the same as the define in "./swmr_common.h"
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+ # This should be the same as the define in "./h5test.h"
+
+###############################################################################
+## short hands and function definitions
+###############################################################################
+DPRINT=: # Set to "echo Debug:" for debugging printing,
+ # else ":" for noop.
+IFDEBUG=: # Set to null to turn on debugging, else ":" for noop.
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# To wait for the writer message file or till the maximum # of seconds is reached
+# $1 is the message file to wait for
+# This performs similar function as the routine h5_wait_message() in test/h5test.c
+WAIT_MESSAGE() {
+ message=$1 # Get the name of the message file to wait for
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ mexist=0 # Indicate whether the message file is found
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $message ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $message # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ if test $mexist -eq 0; then
+ # Issue warning that the writer message file is not found, continue with launching the reader(s)
+ echo warning: $WRITER_MESSAGE is not found after waiting $MESSAGE_TIMEOUT seconds
+ else
+ echo $WRITER_MESSAGE is found
+ fi
+}
+
+###############################################################################
+## Main
+##
+## Modifications:
+## Vailin Choi; July 2013
+## Add waiting of message file before launching the reader(s).
+## Due to the implementation of file locking, coordination
+## is needed in file opening for the writer/reader tests
+## to proceed as expected.
+##
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "SWMR acceptance tests skipped"
+ echo
+ exit 0
+fi
+
+# Parse options (none accepted at this time)
+while [ $# -gt 0 ]; do
+ case "$1" in
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+# Loop over index types
+for index_type in "-i ea" "-i b2"
+do
+ # Try with and without compression
+ for compress in "" "-c 5"
+ do
+ echo
+ echo "###############################################################################"
+ echo "## Generator test"
+ echo "###############################################################################"
+ # Launch the Generator without SWMR_WRITE
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Generator with SWMR_WRITE
+ echo launch the swmr_generator with SWMR_WRITE
+ ./swmr_generator -s $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Use H5Fstart_swmr_write() to enable SWMR writing mode"
+ echo "###############################################################################"
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Writer
+ echo launch the swmr_start_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_start_write $compress $index_type $Nrecords $seed &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+ #
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+
+ #
+ # Launch the Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ n=0
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_reader $Nsecs_add $seed &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Writer test - test expanding the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator -s $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Writer
+ echo launch the swmr_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_writer -o $Nrecords $seed &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+ #
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ n=0
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_reader $Nsecs_add $seed &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Remove test - test shrinking the dataset"
+ echo "###############################################################################"
+ #
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ # Launch the Remove Writer
+ echo launch the swmr_remove_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_remove_writer -o $Nrecs_rem $seed &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+ #
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Remove Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_remove_reader $Nsecs_rem $seed &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Add/remove test - randomly grow or shrink the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Writer (not in parallel - just to rebuild the datasets)
+ echo launch the swmr_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_writer $Nrecords $seed
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ #
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Add/Remove Writer
+ echo launch the swmr_addrem_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_addrem_writer $Nrecords $seed &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+ #
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Add/Remove Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_remove_reader $Nsecs_addrem $seed &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Sparse writer test - test writing to random locations in the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ # NOTE: Random seed is shared between readers and writers and is
+ # created by the generator.
+ echo launch the swmr_generator
+ seed="" # Put -r <random seed> command here
+ ./swmr_generator $compress $index_type $seed
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ #
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ # Launch the Sparse writer
+ echo launch the swmr_sparse_writer
+ nice -n 20 ./swmr_sparse_writer $Nrecs_spa &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+ #
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Sparse readers
+ n=0
+ pid_readers=""
+ echo launch $Nrdrs_spa swmr_sparse_readers
+ while [ $n -lt $Nrdrs_spa ]; do
+ # The sparse writer spits out a LOT of data so it's set to 'quiet'
+ ./swmr_sparse_reader -q $Nrecs_spa &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Collect exit code of the readers
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+ done
+done
+
+###############################################################################
+## Report and exit
+###############################################################################
+
+$DPRINT nerrors=$nerrors
+if test $nerrors -eq 0 ; then
+ echo "SWMR tests passed."
+ exit 0
+else
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+fi
diff --git a/test/testvdsswmr.sh.in b/test/testvdsswmr.sh.in
new file mode 100644
index 0000000..d69b8c0
--- /dev/null
+++ b/test/testvdsswmr.sh.in
@@ -0,0 +1,199 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the swmr feature using virtual datasets.
+#
+# Created:
+# Dana Robinson, November 2015
+
+srcdir=@srcdir@
+
+###############################################################################
+## test parameters
+###############################################################################
+
+Nwriters=6 # number of writers (1 per source dataset)
+Nreaders=5 # number of readers to launch
+nerrors=0
+
+###############################################################################
+## definitions for message file to coordinate test runs
+###############################################################################
+WRITER_MESSAGE=SWMR_WRITER_MESSAGE # The message file created by writer that the open is complete
+ # This should be the same as the define in "./swmr_common.h"
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+ # This should be the same as the define in "./h5test.h"
+
+###############################################################################
+## short hands and function definitions
+###############################################################################
+DPRINT=: # Set to "echo Debug:" for debugging printing,
+ # else ":" for noop.
+IFDEBUG=: # Set to null to turn on debugging, else ":" for noop.
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# To wait for the writer message file or till the maximum # of seconds is reached
+# $1 is the message file to wait for
+# This performs similar function as the routine h5_wait_message() in test/h5test.c
+WAIT_MESSAGE() {
+ message=$1 # Get the name of the message file to wait for
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ mexist=0 # Indicate whether the message file is found
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $message ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $message # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ if test $mexist -eq 0; then
+ # Issue warning that the writer message file is not found, continue with launching the reader(s)
+ echo warning: $WRITER_MESSAGE is not found after waiting $MESSAGE_TIMEOUT seconds
+ else
+ echo $WRITER_MESSAGE is found
+ fi
+}
+
+###############################################################################
+## Main
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "SWMR acceptance tests skipped"
+ echo
+ exit 0
+fi
+
+# Parse options (none accepted at this time)
+while [ $# -gt 0 ]; do
+ case "$1" in
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+echo
+echo "###############################################################################"
+echo "## Basic VDS SWMR test - writing to a tiled plane"
+echo "###############################################################################"
+
+# Launch the file generator
+echo launch the generator
+./vds_swmr_gen
+if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+fi
+
+# Check for error and exit if one occured
+$DPRINT nerrors=$nerrors
+if test $nerrors -ne 0 ; then
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
+# Launch the writers
+echo "launch the $Nwriters SWMR VDS writers (1 per source)"
+pid_writers=""
+n=0
+while [ $n -lt $Nwriters ]; do
+ ./vds_swmr_writer $n &
+ pid_writers="$pid_writers $!"
+ n=`expr $n + 1`
+done
+$DPRINT pid_writers=$pid_writers
+$IFDEBUG ps
+
+# Sleep to ensure that the writers have started
+sleep 3
+
+# Launch the readers
+echo launch $Nreaders SWMR readers
+pid_readers=""
+n=0
+while [ $n -lt $Nreaders ]; do
+ ./vds_swmr_reader &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+done
+$DPRINT pid_readers=$pid_readers
+$IFDEBUG ps
+
+# Collect exit code of the writers
+for xpid in $pid_writers; do
+ $DPRINT checked writer $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+done
+
+# Collect exit code of the readers
+# (they usually finish after the writers)
+for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+done
+
+# Check for error and exit if one occured
+$DPRINT nerrors=$nerrors
+if test $nerrors -ne 0 ; then
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
+###############################################################################
+## Report and exit
+###############################################################################
+
+$DPRINT nerrors=$nerrors
+if test $nerrors -eq 0 ; then
+ echo "VDS SWMR tests passed."
+ exit 0
+else
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
diff --git a/test/tfile.c b/test/tfile.c
index c36045b..8180396 100644
--- a/test/tfile.c
+++ b/test/tfile.c
@@ -26,6 +26,7 @@
#include "H5srcdir.h"
#include "H5Bprivate.h"
+#include "H5Iprivate.h"
#include "H5Pprivate.h"
/*
@@ -114,7 +115,7 @@
/* Declaration for test_get_obj_ids() */
#define FILE7 "tfile7.h5" /* Test file */
-#define NGROUPS 2
+#define N_GROUPS 2
#define NDSETS 4
const char *OLD_FILENAME[] = { /* Files created under 1.6 branch and 1.8 branch */
@@ -143,6 +144,11 @@ test_obj_count_and_id(hid_t, hid_t, hid_t, hid_t, hid_t, hid_t);
static void
check_file_id(hid_t, hid_t);
+/* Helper routine used by test_rw_noupdate() */
+static int cal_chksum(const char *file, uint32_t *chksum);
+
+static void test_rw_noupdate(void);
+
/****************************************************************
**
** test_file_create(): Low-level file creation I/O test routine.
@@ -962,7 +968,7 @@ create_objects(hid_t fid1, hid_t fid2, hid_t *ret_did, hid_t *ret_gid1,
static void
test_get_obj_ids(void)
{
- hid_t fid, gid[NGROUPS], dset[NDSETS];
+ hid_t fid, gid[N_GROUPS], dset[NDSETS];
hid_t filespace;
hsize_t file_dims[F2_RANK] = {F2_DIM0, F2_DIM1};
ssize_t oid_count, ret_count;
@@ -979,8 +985,8 @@ test_get_obj_ids(void)
filespace = H5Screate_simple(F2_RANK, file_dims, NULL);
CHECK(filespace, FAIL, "H5Screate_simple");
- /* creates NGROUPS groups under the root group */
- for(m = 0; m < NGROUPS; m++) {
+ /* creates N_GROUPS groups under the root group */
+ for(m = 0; m < N_GROUPS; m++) {
sprintf(gname, "group%d", m);
gid[m] = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK(gid[m], FAIL, "H5Gcreate2");
@@ -993,10 +999,10 @@ test_get_obj_ids(void)
CHECK(dset[n], FAIL, "H5Dcreate2");
}
- /* The number of opened objects should be NGROUPS + NDSETS + 1. One is opened file. */
+ /* The number of opened objects should be N_GROUPS + NDSETS + 1. One is opened file. */
oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL);
CHECK(oid_count, FAIL, "H5Fget_obj_count");
- VERIFY(oid_count, (NGROUPS + NDSETS + 1), "H5Fget_obj_count");
+ VERIFY(oid_count, (N_GROUPS + NDSETS + 1), "H5Fget_obj_count");
oid_list = (hid_t *)HDcalloc((size_t)oid_list_size, sizeof(hid_t));
CHECK(oid_list, NULL, "HDcalloc");
@@ -1017,11 +1023,11 @@ test_get_obj_ids(void)
}
}
- /* The number of opened objects should be NGROUPS + 1 + 1. The first one is opened file. The second one
+ /* The number of opened objects should be N_GROUPS + 1 + 1. The first one is opened file. The second one
* is the dataset ID left open from the previous around of H5Fget_obj_ids */
oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL);
CHECK(oid_count, FAIL, "H5Fget_obj_count");
- VERIFY(oid_count, NGROUPS + 2, "H5Fget_obj_count");
+ VERIFY(oid_count, N_GROUPS + 2, "H5Fget_obj_count");
/* Get the IDs of the left opend objects */
ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list);
@@ -2318,93 +2324,101 @@ test_cached_stab_info(void)
CHECK(ret, FAIL, "H5Fclose");
} /* end test_cached_stab_info() */
+/*
+ * To calculate the checksum for a file.
+ * This is a helper routine for test_rw_noupdate().
+ */
+static int
+cal_chksum(const char *file, uint32_t *chksum)
+{
+ int curr_num_errs = GetTestNumErrs(); /* Retrieve the current # of errors */
+ int fdes = -1; /* File descriptor */
+ void *file_data = NULL; /* Copy of file data */
+ ssize_t bytes_read; /* # of bytes read */
+ h5_stat_t sb; /* Stat buffer for file */
+ herr_t ret; /* Generic return value */
+
+ /* Open the file */
+ fdes = HDopen(file, O_RDONLY, 0);
+ CHECK(fdes, FAIL, "HDopen");
+
+ /* Retrieve the file's size */
+ ret = HDfstat(fdes, &sb);
+ CHECK(fdes, FAIL, "HDfstat");
+
+ /* Allocate space for the file data */
+ file_data = HDmalloc((size_t)sb.st_size);
+ CHECK(file_data, NULL, "HDmalloc");
+
+ if(file_data) {
+ /* Read file's data into memory */
+ bytes_read = HDread(fdes, file_data, (size_t)sb.st_size);
+ CHECK(bytes_read == sb.st_size, FALSE, "HDmalloc");
+
+ /* Calculate checksum */
+ *chksum = H5_checksum_lookup3(file_data, sizeof(file_data), 0);
+
+ /* Free memory */
+ HDfree(file_data);
+ }
+
+ /* Close the file */
+ ret = HDclose(fdes);
+ CHECK(ret, FAIL, "HDclose");
+
+ return((GetTestNumErrs() == curr_num_errs) ? 0 : -1);
+} /* cal_chksum() */
+
/****************************************************************
**
** test_rw_noupdate(): low-level file test routine.
** This test checks to ensure that opening and closing a file
** with read/write permissions does not write anything to the
** file if the file does not change.
+** Due to the implementation of file locking (status_flags in
+** the superblock is used), this test is changed to use checksum
+** instead of timestamp to verify the file is not changed.
**
-** Programmer: Mike McGreevy
-** mamcgree@hdfgroup.org
-** June 29, 2009
+** Programmer: Vailin Choi; July 2013
**
*****************************************************************/
static void
test_rw_noupdate(void)
{
- int fd; /* File Descriptor */
- h5_stat_t sb1, sb2; /* Info from 'stat' call */
- double diff; /* Difference in modification times */
herr_t ret; /* Generic return value */
+ hid_t fid; /* File ID */
+ uint32_t chksum1, chksum2; /* Checksum value */
/* Output message about test being performed */
MESSAGE(5, ("Testing to verify that nothing is written if nothing is changed.\n"));
- /* First make sure the stat function behaves as we expect - the modification time
- * is the time that the file was modified last time. */
- fd = HDopen(SFILE1, O_RDWR | O_CREAT | O_TRUNC, 0666);
- CHECK(fd, FAIL, "HDopen");
- ret = HDclose(fd);
- CHECK(ret, FAIL, "HDclose");
-
- /* Determine File's Initial Timestamp */
- ret = HDstat(SFILE1, &sb1);
- VERIFY(ret, 0, "HDstat");
-
- /* Wait for 2 seconds */
- /* (This ensures a system time difference between the two file accesses) */
- HDsleep(2);
-
- fd = HDopen(SFILE1, O_RDWR, 0666);
- CHECK(fd, FAIL, "HDopen");
- ret = HDclose(fd);
- CHECK(ret, FAIL, "HDclose");
-
- /* Determine File's New Timestamp */
- ret = HDstat(SFILE1, &sb2);
- VERIFY(ret, 0, "HDstat");
-
- /* Get difference between timestamps */
- diff = HDdifftime(sb2.st_mtime, sb1.st_mtime);
+ /* Create and Close a HDF5 File */
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
- /* Check That Timestamps Are Equal */
- if(diff > 0.0F) {
- /* Output message about test being performed */
- MESSAGE(1, ("Testing to verify that nothing is written if nothing is changed: This test is skipped on this system because the modification time from stat is the same as the last access time.\n"));
- } /* end if */
- else {
- hid_t file_id; /* HDF5 File ID */
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
- /* Create and Close a HDF5 File */
- file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- CHECK(file_id, FAIL, "H5Fcreate");
- ret = H5Fclose(file_id);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Calculate checksum for the file */
+ ret = cal_chksum(FILE1, &chksum1);
+ CHECK(ret, FAIL, "HDopen");
- /* Determine File's Initial Timestamp */
- ret = HDstat(FILE1, &sb1);
- VERIFY(ret, 0, "HDfstat");
+ /* Open and close File With Read/Write Permission */
+ fid = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
- /* Wait for 2 seconds */
- /* (This ensures a system time difference between the two file accesses) */
- HDsleep(2);
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
- /* Open and Close File With Read/Write Permission */
- file_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
- CHECK(file_id, FAIL, "H5Fopen");
- ret = H5Fclose(file_id);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Calculate checksum for the file */
+ ret = cal_chksum(FILE1, &chksum2);
+ CHECK(ret, FAIL, "HDopen");
- /* Determine File's New Timestamp */
- ret = HDstat(FILE1, &sb2);
- VERIFY(ret, 0, "HDstat");
+ /* The two checksums are the same, i.e. the file is not changed */
+ VERIFY(chksum1, chksum2, "Checksum");
- /* Ensure That Timestamps Are Equal */
- diff = HDdifftime(sb2.st_mtime, sb1.st_mtime);
- ret = (diff > 0.0F);
- VERIFY(ret, 0, "Timestamp");
- } /* end else */
} /* end test_rw_noupdate() */
/****************************************************************
@@ -3205,8 +3219,10 @@ test_filespace_compatible(void)
CHECK(fd_new, FAIL, "HDopen");
/* Copy data */
- while((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0)
- HDwrite(fd_new, buf, (size_t)nread);
+ while((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0) {
+ ssize_t write_err = HDwrite(fd_new, buf, (size_t)nread);
+ CHECK(write_err, -1, "HDwrite");
+ } /* end while */
/* Close the files */
ret = HDclose(fd_old);
@@ -3394,7 +3410,7 @@ test_libver_bounds(void)
/* Run the tests */
test_libver_bounds_real(H5F_LIBVER_EARLIEST, 1, H5F_LIBVER_LATEST, 2);
- test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 1);
+ test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 2);
} /* end test_libver_bounds() */
/****************************************************************
@@ -3510,7 +3526,7 @@ test_libver_macros2(void)
/****************************************************************
**
-** test_deprec():
+** test_deprec():
** Test deprecated functionality.
**
****************************************************************/
@@ -3686,6 +3702,14 @@ test_file(void)
test_libver_bounds(); /* Test compatibility for file space management */
test_libver_macros(); /* Test the macros for library version comparison */
test_libver_macros2(); /* Test the macros for library version comparison */
+ /*
+ * The two tests: test_swmr_write() and test_swmr_read() are removed.
+ * They are covered by the following tests in swmr.c:
+ * test_file_lock_same();
+ * test_file_lock_swmr_same();
+ * test_file_lock_concur();
+ * test_file_lock_swmr_concur();
+ */
#ifndef H5_NO_DEPRECATED_SYMBOLS
test_deprec(); /* Test deprecated routines */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
diff --git a/test/th5s.h5 b/test/th5s.h5
index 7a0bfb3..bc2b666 100644
--- a/test/th5s.h5
+++ b/test/th5s.h5
Binary files differ
diff --git a/test/tlayouto.h5 b/test/tlayouto.h5
index a038e68..3322020 100644
--- a/test/tlayouto.h5
+++ b/test/tlayouto.h5
Binary files differ
diff --git a/test/tmtimen.h5 b/test/tmtimen.h5
index 96e5fb3..007a6b6 100644
--- a/test/tmtimen.h5
+++ b/test/tmtimen.h5
Binary files differ
diff --git a/test/tmtimeo.h5 b/test/tmtimeo.h5
index 8cacf4a..c9dfcc4 100644
--- a/test/tmtimeo.h5
+++ b/test/tmtimeo.h5
Binary files differ
diff --git a/test/twriteorder.c b/test/twriteorder.c
new file mode 100644
index 0000000..0dd768c
--- /dev/null
+++ b/test/twriteorder.c
@@ -0,0 +1,438 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+*
+* Test program: twriteorder
+*
+* Test to verify that the write order is strictly consistent.
+* The SWMR feature requires that the order of write is strictly consistent.
+* "Strict consistency in computer science is the most stringent consistency
+* model. It says that a read operation has to return the result of the
+* latest write operation which occurred on that data item."--
+* (http://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability).
+* This is also an alternative form of what POSIX write require that after a
+* write operation has returned success, all reads issued afterward should
+* get the same data the write has written.
+*
+* Created: Albert Cheng, 2013/8/28.
+* Modified:
+*************************************************************/
+
+/***********************************************************
+*
+* Algorithm
+*
+* The test simulates what SWMR does by writing chained blocks and see if
+* they can be read back correctly.
+* There is a writer process and multiple read processes.
+* The file is divided into 2KB partitions. Then writer writes 1 chained
+* block, each of 1KB big, in each partition after the first partition.
+* Each chained block has this structure:
+* Byte 0-3: offset address of its child block. The last child uses 0 as NULL.
+* Byte 4-1023: some artificial data.
+* The child block address of Block 1 is NULL (0).
+* The child block address of Block 2 is the offset address of Block 1.
+* The child block address of Block n is the offset address of Block n-1.
+* After all n blocks are written, the offset address of Block n is written
+* to the offset 0 of the first partition.
+* Therefore, by the time the offset address of Block n is written to this
+* position, all n chain-linked blocks have been written.
+*
+* The other reader processes will try to read the address value at the
+* offset 0. The value is initially NULL(0). When it changes to non-zero,
+* it signifies the writer process has written all the chain-link blocks
+* and they are ready for the reader processes to access.
+*
+* If the system, in which the writer and reader processes run, the readers
+* will always get all chain-linked blocks correctly. If the order of write
+* is not maintained, some reader processes may found unexpect block data.
+*
+*************************************************************/
+
+#include "h5test.h"
+
+#define DATAFILE "twriteorder.dat"
+/* #define READERS_MAX 10 */ /* max number of readers */
+#define BLOCKSIZE_DFT 1024 /* 1KB */
+#define PARTITION_DFT 2048 /* 2KB */
+#define NLINKEDBLOCKS_DFT 512 /* default 512 */
+#define SIZE_BLKADDR 4 /* expected sizeof blkaddr */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+
+/* type declarations */
+typedef enum part_t {
+ UC_READWRITE =0, /* both writer and reader */
+ UC_WRITER, /* writer only */
+ UC_READER /* reader only */
+} part_t;
+
+/* prototypes */
+int create_wo_file(void);
+int write_wo_file(void);
+int read_wo_file(void);
+void usage(const char *prog);
+int setup_parameters(int argc, char * const argv[]);
+int parse_option(int argc, char * const argv[]);
+
+/* Global Variable definitions */
+const char *progname_g="twriteorder"; /* program name */
+int write_fd_g;
+int blocksize_g, part_size_g, nlinkedblock_g;
+part_t launch_g;
+
+/* Function definitions */
+
+/* Show help page */
+void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h Print a usage message and exit\n");
+ fprintf(stderr, " -l w|r launch writer or reader only. [default: launch both]\n");
+ fprintf(stderr, " -b N Block size [default: %d]\n", BLOCKSIZE_DFT);
+ fprintf(stderr, " -p N Partition size [default: %d]\n", PARTITION_DFT);
+ fprintf(stderr, " -n N Number of linked blocks [default: %d]\n", NLINKEDBLOCKS_DFT);
+ fprintf(stderr, " where N is an integer value\n");
+ fprintf(stderr, "\n");
+}
+
+/* Setup test parameters by parsing command line options.
+ * Setup default values if not set by options. */
+int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *cmd_options = "hb:l:n:p:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, cmd_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'b': /* number of planes to write/read */
+ if ((blocksize_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad blocksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((nlinkedblock_g = atoi(optarg)) < 2){
+ fprintf(stderr, "bad number of linked blocks %s, must be greater than 1.\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'p': /* number of planes to write/read */
+ if ((part_size_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad partition size %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'l': /* launch reader or writer only */
+ switch (*optarg) {
+ case 'r': /* reader only */
+ launch_g = UC_READER;
+ break;
+ case 'w': /* writer only */
+ launch_g = UC_WRITER;
+ break;
+ default:
+ fprintf(stderr, "launch value(%c) should be w or r only.\n", *optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ break;
+ }
+ printf("launch = %d\n", launch_g);
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ usage(progname_g);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* verify partition size must be >= blocksize */
+ if (part_size_g < blocksize_g ){
+ fprintf(stderr, "Blocksize %d should not be bigger than partition size %d\n",
+ blocksize_g, part_size_g);
+ Hgoto_error(-1);
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+}
+
+/* Setup parameters for the test case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* test case defaults */
+ blocksize_g = BLOCKSIZE_DFT;
+ part_size_g = PARTITION_DFT;
+ nlinkedblock_g = NLINKEDBLOCKS_DFT;
+ launch_g = UC_READWRITE;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+
+ /* show parameters and return */
+ printf("blocksize = %ld\n", (long)blocksize_g);
+ printf("part_size = %ld\n", (long)part_size_g);
+ printf("nlinkedblock = %ld\n", (long)nlinkedblock_g);
+ printf("launch = %d\n", launch_g);
+ return(0);
+}
+
+/* Create the test file with initial "empty" file, that is,
+ * partition 0 has a null (0) address.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int create_wo_file(void)
+{
+ int blkaddr=0; /* blkaddress of next linked block */
+ int ret_code;
+
+ /* Create the data file */
+ if ((write_fd_g = HDopen(DATAFILE, O_RDWR|O_TRUNC|O_CREAT, 0664)) < 0) {
+ printf("WRITER: error from open\n");
+ return -1;
+ }
+ blkaddr=0;
+ /* write it to partition 0 */
+ if ((ret_code=HDwrite(write_fd_g, &blkaddr, (size_t)SIZE_BLKADDR)) != SIZE_BLKADDR){
+ printf("blkaddr write failed\n");
+ return -1;
+ }
+
+ /* File initialized, return success */
+ return 0;
+}
+
+int write_wo_file(void)
+{
+ int blkaddr;
+ int blkaddr_old=0;
+ int i;
+ char buffer[BLOCKSIZE_DFT];
+ int ret_code;
+
+
+ /* write block 1, 2, ... */
+ for (i=1; i<nlinkedblock_g; i++){
+ /* calculate where to write this block */
+ blkaddr = i*part_size_g + i;
+ /* store old block address in byte 0-3 */
+ HDmemcpy(&buffer[0], &blkaddr_old, sizeof(blkaddr_old));
+ /* fill the rest with the lowest byte of i */
+ HDmemset(&buffer[4], i & 0xff, (size_t) (BLOCKSIZE_DFT-4));
+ /* write the block */
+#ifdef DEBUG
+ printf("writing block at %d\n", blkaddr);
+#endif
+ HDlseek(write_fd_g, (HDoff_t)blkaddr, SEEK_SET);
+ if ((ret_code=HDwrite(write_fd_g, buffer, (size_t)blocksize_g)) != blocksize_g){
+ printf("blkaddr write failed in partition %d\n", i);
+ return -1;
+ }
+ blkaddr_old = blkaddr;
+ }
+ /* write the last blkaddr in partition 0 */
+ HDlseek(write_fd_g, (HDoff_t)0, SEEK_SET);
+ if ((ret_code=HDwrite(write_fd_g, &blkaddr_old, (size_t)sizeof(blkaddr_old))) != sizeof(blkaddr_old)){
+ printf("blkaddr write failed in partition %d\n", 0);
+ return -1;
+ }
+
+ /* all writes done. return succeess. */
+ printf("wrote %d blocks\n", nlinkedblock_g);
+ return 0;
+}
+
+int read_wo_file(void)
+{
+ int read_fd;
+ int blkaddr=0;
+ int ret_code;
+ int linkedblocks_read=0;
+ char buffer[BLOCKSIZE_DFT];
+
+ /* Open the data file */
+ if ((read_fd = HDopen(DATAFILE, O_RDONLY, 0)) < 0) {
+ printf("READER: error from open\n");
+ return -1;
+ }
+ /* keep reading the initial block address until it is non-zero before proceeding. */
+ while (blkaddr == 0){
+ HDlseek(read_fd, (HDoff_t)0, SEEK_SET);
+ if ((ret_code=HDread(read_fd, &blkaddr, (size_t)sizeof(blkaddr))) != sizeof(blkaddr)){
+ printf("blkaddr read failed in partition %d\n", 0);
+ return -1;
+ }
+ }
+ linkedblocks_read++;
+
+ /* got a non-zero blkaddr. Proceed down the linked blocks. */
+#ifdef DEBUG
+ printf("got initial block address=%d\n", blkaddr);
+#endif
+ while (blkaddr != 0){
+ HDlseek(read_fd, (HDoff_t)blkaddr, SEEK_SET);
+ if ((ret_code=HDread(read_fd, buffer, (size_t)blocksize_g)) != blocksize_g){
+ printf("blkaddr read failed in partition %d\n", 0);
+ return -1;
+ }
+ linkedblocks_read++;
+ /* retrieve the block address in byte 0-3 */
+ HDmemcpy(&blkaddr, &buffer[0], sizeof(blkaddr));
+#ifdef DEBUG
+ printf("got next block address=%d\n", blkaddr);
+#endif
+ }
+
+ printf("read %d blocks\n", linkedblocks_read);
+ return 0;
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created the test file needed and close it;
+ * fork: child processes become the reader processes;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ /*pid_t childpid[READERS_MAX];
+ int child_ret_value[READERS_MAX];*/
+ pid_t childpid=0;
+ int child_ret_value;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (launch_g != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_wo_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+ /* flush output before possible fork */
+ HDfflush(stdout);
+
+ if (launch_g==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (launch_g != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_wo_file() < 0){
+ fprintf(stderr, "read_wo_file encountered error\n");
+ exit(1);
+ }
+ /* Reader is done. Clean up by removing the data file */
+ HDremove(DATAFILE);
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_wo_file() < 0){
+ fprintf(stderr, "write_wo_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (launch_g == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
diff --git a/test/use.h b/test/use.h
new file mode 100644
index 0000000..45b4a49
--- /dev/null
+++ b/test/use.h
@@ -0,0 +1,69 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Use Case Header file: common definitions for use cases tests.
+ */
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "h5test.h"
+
+/* Macro definitions */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+#define Hgoto_done {goto done;}
+#define Chunksize_DFT 256 /* chunksize default */
+#define ErrorReportMax 10 /* max number of errors reported */
+/* these two definitions must match each other */
+#define UC_DATATYPE H5T_NATIVE_SHORT /* use case HDF5 data type */
+#define UC_CTYPE short /* use case C data type */
+#define UC_RANK 3 /* use case dataset rank */
+
+/* Name of message file that is sent by the writer */
+#define WRITER_MESSAGE "USE_WRITER_MESSAGE"
+
+/* type declarations */
+typedef enum part_t {
+ UC_READWRITE =0, /* both writer and reader */
+ UC_WRITER, /* writer only */
+ UC_READER /* reader only */
+} part_t;
+typedef struct options_t {
+ int chunksize; /* chunks are chunksize^2 planes */
+ int chunkplanes; /* number of planes per chunk, default 1 */
+ hsize_t chunkdims[UC_RANK]; /* chunk dims is (chunkplan, chunksize, chunksize) */
+ hsize_t dims[UC_RANK]; /* dataset initial dims */
+ hsize_t max_dims[UC_RANK]; /* dataset max dims */
+ hsize_t nplanes; /* number of planes to write, default proportional to chunksize */
+ char *filename; /* use case data filename */
+ part_t launch; /* launch writer, reader or both */
+ int use_swmr; /* use swmr open (1) or not */
+ int iterations; /* iterations, default 1 */
+} options_t;
+
+/* global variables declarations */
+extern options_t UC_opts; /* Use Case Options */
+extern const char *progname_g; /* Program name */
+
+/* prototype declarations */
+int parse_option(int argc, char * const argv[]);
+int setup_parameters(int argc, char * const argv[]);
+void show_parameters(void);
+void usage(const char *prog);
+int create_uc_file(void);
+int write_uc_file(hbool_t tosend);
+int read_uc_file(hbool_t towait);
diff --git a/test/use_append_chunk.c b/test/use_append_chunk.c
new file mode 100644
index 0000000..a3219c2
--- /dev/null
+++ b/test/use_append_chunk.c
@@ -0,0 +1,214 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Use Case 1.7 Appending a single chunk
+ * Description:
+ * Appending a single chunk of raw data to a dataset along an unlimited
+ * dimension within a pre-created file and reading the new data back.
+ * Goal:
+ * Read data appended by the Writer to a pre-existing dataset in a
+ * file. The dataset has one or more unlimited dimensions. The data is
+ * appended by a hyperslab that is contained in one chunk (for example,
+ * appending 2-dim planes along the slowest changing dimension in the
+ * 3-dim dataset).
+ * Level:
+ * User Level
+ * Guarantees:
+ * o Readers will see the modified dimension sizes after the Writer
+ * finishes HDF5 metadata updates and issues H5Fflush or H5Oflush calls.
+ * o Readers will see newly appended data after the Writer finishes
+ * the flush operation.
+ *
+ * Preconditions:
+ * o Readers are not allowed to modify the file. o All datasets
+ * that are modified by the Writer exist when the Writer opens the file.
+ * o All datasets that are modified by the Writer exist when a Reader
+ * opens the file. o Data is written by a hyperslab contained in
+ * one chunk.
+ *
+ * Main Success Scenario:
+ * 1. An application creates a file with required objects (groups,
+ * datasets, and attributes).
+ * 2. The Writer application opens the file and datasets in the file
+ * and starts adding data along the unlimited dimension using a hyperslab
+ * selection that corresponds to an HDF5 chunk.
+ * 3. A Reader opens the file and a dataset in a file, and queries
+ * the sizes of the dataset; if the extent of the dataset has changed,
+ * reads the appended data back.
+ *
+ * Discussion points:
+ * 1. Since the new data is written to the file, and metadata update
+ * operation of adding pointer to the newly written chunk is atomic and
+ * happens after the chunk is on the disk, only two things may happen
+ * to the Reader:
+ * o The Reader will not see new data.
+ * o The Reader will see all new data written by Writer.
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Created: Albert Cheng, 2013/5/28.
+ * Modified:
+ */
+
+#include "use.h"
+
+/* Global Variable definitions */
+options_t UC_opts; /* Use Case Options */
+const char *progname_g="use_append_chunk"; /* program name */
+
+/* Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ HDmemset(&UC_opts, 0, sizeof(options_t));
+ UC_opts.chunksize = Chunksize_DFT;
+ UC_opts.use_swmr = 1; /* use swmr open */
+ UC_opts.iterations = 1;
+ UC_opts.chunkplanes = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ UC_opts.chunkdims[0] = UC_opts.chunkplanes;
+ UC_opts.chunkdims[1]=UC_opts.chunkdims[2]=UC_opts.chunksize;
+
+ /* set dataset initial and max dims */
+ UC_opts.dims[0] = 0;
+ UC_opts.max_dims[0] = H5S_UNLIMITED;
+ UC_opts.dims[1] = UC_opts.dims[2] = UC_opts.max_dims[1]=UC_opts.max_dims[2]=UC_opts.chunksize;
+
+ /* set nplanes */
+ if (UC_opts.nplanes == 0)
+ UC_opts.nplanes = UC_opts.chunksize;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * fork: child process becomes the reader process;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ pid_t childpid=0;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+ int child_ret_value;
+ hbool_t send_wait = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* Determine the need to send/wait message file*/
+ if(UC_opts.launch == UC_READWRITE) {
+ HDunlink(WRITER_MESSAGE);
+ send_wait = 1;
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (UC_opts.launch != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_uc_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+
+ if (UC_opts.launch==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (UC_opts.launch != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_uc_file(send_wait) < 0){
+ fprintf(stderr, "read_uc_file encountered error\n");
+ exit(1);
+ }
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_uc_file(send_wait) < 0){
+ fprintf(stderr, "write_uc_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (UC_opts.launch == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
diff --git a/test/use_append_mchunks.c b/test/use_append_mchunks.c
new file mode 100644
index 0000000..b19fe57
--- /dev/null
+++ b/test/use_append_mchunks.c
@@ -0,0 +1,207 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Use Case 1.8 Appending a hyperslab of multiple chunks.
+ * Description:
+ * Appending a hyperslab that spans several chunks of a dataset with
+ * unlimited dimensions within a pre-created file and reading the new
+ * data back.
+ * Goal:
+ * Read data appended by the Writer to a pre-existing dataset in a
+ * file. The dataset has one or more unlimited dimensions. The data
+ * is appended by a hyperslab that is contained in several chunks (for
+ * example, appending 2-dim planes along the slowest changing dimension
+ * in the 3-dim dataset and each plane is covered by 4 chunks).
+ * Level:
+ * User Level
+ * Guarantees:
+ * o Readers will see the modified dimension sizes after the Writer
+ * finishes HDF5 metadata updates and issues H5Fflush or H5Oflush calls.
+ * o Readers will see newly appended data after the Writer finishes
+ * the flush operation.
+ *
+ * Preconditions:
+ * o Readers are not allowed to modify the file.
+ * o All datasets that are modified by the Writer exist when the
+ * Writer opens the file.
+ * o All datasets that are modified by the Writer exist when a Reader
+ * opens the file.
+ *
+ * Main Success Scenario:
+ * 1. An application creates a file with required objects (groups,
+ * datasets, and attributes).
+ * 2. The Writer opens the file and datasets in the file and starts
+ * adding data using H5Dwrite call with a hyperslab selection that
+ * spans several chunks.
+ * 3. A Reader opens the file and a dataset in a file; if the size of
+ * the unlimited dimension has changed, reads the appended data back.
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Created: Albert Cheng, 2013/6/1.
+ * Modified:
+ */
+
+#include "use.h"
+
+/* Global Variable definitions */
+options_t UC_opts; /* Use Case Options */
+const char *progname_g="use_append_mchunks"; /* program name */
+
+/* Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ HDmemset(&UC_opts, 0, sizeof(options_t));
+ UC_opts.chunksize = Chunksize_DFT;
+ UC_opts.use_swmr = 1; /* use swmr open */
+ UC_opts.iterations = 1;
+ UC_opts.chunkplanes = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ UC_opts.chunkdims[0] = UC_opts.chunkplanes;
+ UC_opts.chunkdims[1]=UC_opts.chunkdims[2]=UC_opts.chunksize;
+
+ /* set dataset initial and max dims */
+ UC_opts.dims[0] = 0;
+ UC_opts.max_dims[0] = H5S_UNLIMITED;
+ UC_opts.dims[1] = UC_opts.dims[2] = UC_opts.max_dims[1]=UC_opts.max_dims[2]=2*UC_opts.chunksize;
+
+ /* set nplanes */
+ if (UC_opts.nplanes == 0)
+ UC_opts.nplanes = 2*UC_opts.chunksize;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * fork: child process becomes the reader process;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ pid_t childpid=0;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+ int child_ret_value;
+ hbool_t send_wait = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* Determine the need to send/wait message file*/
+ if(UC_opts.launch == UC_READWRITE) {
+ HDunlink(WRITER_MESSAGE);
+ send_wait = 1;
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (UC_opts.launch != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_uc_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+
+ if (UC_opts.launch==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (UC_opts.launch != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_uc_file(send_wait) < 0){
+ fprintf(stderr, "read_uc_file encountered error\n");
+ exit(1);
+ }
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_uc_file(send_wait) < 0){
+ fprintf(stderr, "write_uc_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (UC_opts.launch == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
diff --git a/test/use_common.c b/test/use_common.c
new file mode 100644
index 0000000..905a8e9
--- /dev/null
+++ b/test/use_common.c
@@ -0,0 +1,631 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "use.h"
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h, --help Print a usage message and exit\n");
+ fprintf(stderr, " -f FN Test file name [default: %s.h5]\n", prog);
+ fprintf(stderr, " -i N, --iteration=N Number of iterations to repeat the whole thing. [default: 1]\n");
+ fprintf(stderr, " -l w|r launch writer or reader only. [default: launch both]\n");
+ fprintf(stderr, " -n N, --nplanes=N Number of planes to write/read. [default: 1000]\n");
+ fprintf(stderr, " -s N, --swmr=N Use SWMR mode (0: no, non-0: yes) default is yes\n");
+ fprintf(stderr, " -z N, --chunksize=N Chunk size [default: %d]\n", Chunksize_DFT);
+ fprintf(stderr, " -y N, --chunkplanes=N Number of planes per chunk [default: 1]\n");
+ fprintf(stderr, "\n");
+}
+
+/* Setup Use Case parameters by parsing command line options.
+* Setup default values if not set by options. */
+int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *nagg_options = "f:hi:l:n:s:y:z:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, nagg_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'f': /* usecase data file name */
+ UC_opts.filename = optarg;
+ break;
+ case 'i': /* iterations */
+ if ((UC_opts.iterations = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad iterations number %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'l': /* launch reader or writer only */
+ switch (*optarg) {
+ case 'r': /* reader only */
+ UC_opts.launch = UC_READER;
+ break;
+ case 'w': /* writer only */
+ UC_opts.launch = UC_WRITER;
+ break;
+ default:
+ fprintf(stderr, "launch value(%c) should be w or r only.\n", *optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ break;
+ }
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((UC_opts.nplanes = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 's': /* use swmr file open mode */
+ if ((UC_opts.use_swmr = atoi(optarg)) < 0){
+ fprintf(stderr, "swmr value should be 0(no) or 1(yes)\n");
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'y': /* Number of planes per chunk */
+ if ((UC_opts.chunkplanes = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes per chunk %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'z': /* size of chunk=(z,z) */
+ if ((UC_opts.chunksize = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad chunksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* set test file name if not given */
+ if (!UC_opts.filename){
+ /* default data file name is <progname>.h5 */
+ if ((UC_opts.filename=(char*)HDmalloc(HDstrlen(progname_g)+4))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ Hgoto_error(-1);
+ };
+ HDstrcpy(UC_opts.filename, progname_g);
+ HDstrcat(UC_opts.filename, ".h5");
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+}
+
+/* Show parameters used for this use case */
+void show_parameters(void){
+ printf("===Parameters used:===\n");
+ printf("chunk dims=(%llu, %llu, %llu)\n", (unsigned long long)UC_opts.chunkdims[0],
+ (unsigned long long)UC_opts.chunkdims[1], (unsigned long long)UC_opts.chunkdims[2]);
+ printf("dataset max dims=(%llu, %llu, %llu)\n", (unsigned long long)UC_opts.max_dims[0],
+ (unsigned long long)UC_opts.max_dims[1], (unsigned long long)UC_opts.max_dims[2]);
+ printf("number of planes to write=%llu\n", (unsigned long long)UC_opts.nplanes);
+ printf("using SWMR mode=%s\n", UC_opts.use_swmr ? "yes(1)" : "no(0)");
+ printf("data filename=%s\n", UC_opts.filename);
+ printf("launch part=");
+ switch (UC_opts.launch){
+ case UC_READWRITE:
+ printf("Reader/Writer\n");
+ break;
+ case UC_WRITER:
+ printf("Writer\n");
+ break;
+ case UC_READER:
+ printf("Reader\n");
+ break;
+ default:
+ /* should not happen */
+ printf("Illegal part(%d)\n", UC_opts.launch);
+ };
+ printf("number of iterations=%d (not used yet)\n", UC_opts.iterations);
+ printf("===Parameters shown===\n");
+}
+
+/* Create the skeleton use case file for testing.
+ * It has one 3d dataset using chunked storage.
+ * The dataset is (unlimited, chunksize, chunksize).
+ * Dataset type is 2 bytes integer.
+ * It starts out "empty", i.e., first dimension is 0.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int create_uc_file(void)
+{
+ hsize_t dims[3]; /* Dataset starting dimensions */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t dsid; /* Dataset ID */
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+
+ /* Create the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fcreate(UC_opts.filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Set up dimension sizes */
+ dims[0] = 0;
+ dims[1] = dims[2] = UC_opts.max_dims[1];
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(3, dims, UC_opts.max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 3, UC_opts.chunkdims) < 0)
+ return -1;
+
+ /* create dataset of progname */
+ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Check that the chunk index type is not version 1 B-tree.
+ * Version 1 B-trees are not supported under SWMR.
+ */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0)
+ return -1;
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting.\n");
+ return -1;
+ }
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ if(H5Pclose(fapl) < 0)
+ return -1;
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Append planes, each of (1,2*chunksize,2*chunksize) to the dataset.
+ * In other words, 4 chunks are appended to the dataset at a time.
+ * Fill each plan with the plane number and then write it at the nth plane.
+ * Increase the plane number and repeat till the end of dataset, when it
+ * reaches chunksize long. End product is a (2*chunksize)^3 cube.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int write_uc_file(hbool_t tosend)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* data buffer */
+ hsize_t cz=UC_opts.chunksize; /* Chunk size */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t chunk_dims[3]; /* Chunk dimensions */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hsize_t i, j, k;
+
+ name = UC_opts.filename;
+
+ /* Open the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if(UC_opts.use_swmr)
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDWR | (UC_opts.use_swmr ? H5F_ACC_SWMR_WRITE : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+
+ if(tosend)
+ /* Send a message that H5Fopen is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE);
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* Find chunksize used */
+ if ((dcpl = H5Dget_create_plist(dsid)) < 0){
+ fprintf(stderr, "H5Dget_create_plist failed\n");
+ return -1;
+ }
+ if (H5D_CHUNKED != H5Pget_layout(dcpl)){
+ fprintf(stderr, "storage layout is not chunked\n");
+ return -1;
+ }
+ if ((rank = H5Pget_chunk(dcpl, 3, chunk_dims)) != 3){
+ fprintf(stderr, "storage rank is not 3\n");
+ return -1;
+ }
+
+ /* verify chunk_dims against set paramenters */
+ if (chunk_dims[0]!=UC_opts.chunkdims[0] || chunk_dims[1] != cz || chunk_dims[2] != cz){
+ fprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
+ (unsigned long long)chunk_dims[2]);
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = UC_opts.dims[1];
+ memdims[2] = UC_opts.dims[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* write planes */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ for (i=0; i<UC_opts.nplanes; i++){
+ /* fill buffer with value i+1 */
+ bufptr = buffer;
+ for (j=0; j<dims[1]; j++)
+ for (k=0; k<dims[2]; k++)
+ *bufptr++ = i;
+
+ /* extend the dataset by one for new plane */
+ dims[0]=i+1;
+ if(H5Dset_extent(dsid, dims) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ start[0]=i;
+ /* Choose the next plane to write */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "Failed H5Sselect_hyperslab\n");
+ return -1;
+ }
+
+ /* Write plane to the dataset */
+ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "Failed H5Dwrite\n");
+ return -1;
+ }
+ /* flush file to make the just written plane available. */
+#if 0
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+#else
+ if(H5Dflush(dsid) < 0)
+#endif
+ {
+ fprintf(stderr, "Failed to H5Fflush file\n");
+ return -1;
+ }
+ }
+
+ /* Done writing. Free/Close all resources including data file */
+ HDfree(buffer);
+ if (H5Dclose(dsid) < 0){
+ fprintf(stderr, "Failed to close datasete\n");
+ return -1;
+ }
+ if (H5Sclose(m_sid) < 0){
+ fprintf(stderr, "Failed to close memory space\n");
+ return -1;
+ }
+ if (H5Sclose(f_sid) < 0){
+ fprintf(stderr, "Failed to close file space\n");
+ return -1;
+ }
+ if (H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+ if (H5Fclose(fid) < 0){
+ fprintf(stderr, "Failed to close file id\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* Read planes from the dataset.
+ * It expects the dataset is being changed (growing).
+ * It checks the unlimited dimension (1st one). When it increases,
+ * it will read in the new planes, one by one, and verify the data correctness.
+ * (The nth plan should contain all "n".)
+ * When the unlimited dimension grows to the chunksize (it becomes a cube),
+ * that is the expected end of data, the reader exits.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int read_uc_file(hbool_t towait)
+{
+ hid_t fapl; /* file access property list ID */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* read data buffer */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t nplane=0, nplane_old=0; /* nth plane, last nth plane */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hsize_t j, k;
+ int nreadererr=0;
+ int nerrs;
+ int nonewplane;
+
+ /* Before reading, wait for the message that H5Fopen is complete--file lock is released */
+ if(towait && h5_wait_message(WRITER_MESSAGE) < 0) {
+ fprintf(stderr, "Cannot find writer message file...failed\n");
+ return -1;
+ }
+
+ name = UC_opts.filename;
+
+ /* Open the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY | (UC_opts.use_swmr ? H5F_ACC_SWMR_READ : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+ if (H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = UC_opts.dims[1];
+ memdims[2] = UC_opts.dims[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ * Verify dimension is as expected (unlimited,2*chunksize,2*chunksize).
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset dimension is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ fprintf(stderr, "But memdims=(%llu,%llu,%llu)\n",
+ (unsigned long long)memdims[0], (unsigned long long)memdims[1],
+ (unsigned long long)memdims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* Read 1 plane at a time whenever the dataset grows larger
+ * (along dim[0]) */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ /* quit when all nplanes have been read */
+ nonewplane=0;
+ while (nplane_old < UC_opts.nplanes ){
+ /* print progress message according to if new planes are availalbe */
+ if (nplane_old < dims[0]) {
+ if (nonewplane){
+ /* end the previous message */
+ printf("\n");
+ nonewplane=0;
+ }
+ printf("reading planes %llu to %llu\n", (unsigned long long)nplane_old,
+ (unsigned long long)dims[0]);
+ }else{
+ if (nonewplane){
+ printf(".");
+ if (nonewplane>=30){
+ fprintf(stderr, "waited too long for new plane, quit.\n");
+ return -1;
+ }
+ }else{
+ /* print mesg only the first time; dots still no new plane */
+ printf("no new planes to read ");
+ }
+ nonewplane++;
+ /* pause for a second */
+ sleep(1);
+ }
+ for (nplane=nplane_old; nplane < dims[0]; nplane++){
+ /* read planes between last old nplanes and current extent */
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dget_space failed\n");
+ return -1;
+ }
+
+ start[0]=nplane;
+ /* Choose the next plane to read */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "H5Sselect_hyperslab failed\n");
+ return -1;
+ }
+
+ /* Read the plane from the dataset */
+ if(H5Dread(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "H5Dread failed\n");
+ return -1;
+ }
+
+ /* compare read data with expected data value which is nplane */
+ bufptr = buffer;
+ nerrs=0;
+ for (j=0; j<dims[1]; j++){
+ for (k=0; k<dims[2]; k++){
+ if ((hsize_t)*bufptr++ != nplane){
+ if (++nerrs < ErrorReportMax){
+ fprintf(stderr,
+ "found error %llu plane(%llu,%llu), expected %llu, got %d\n",
+ (unsigned long long)nplane, (unsigned long long)j,
+ (unsigned long long)k, (unsigned long long)nplane, (int)*(bufptr-1));
+ }
+ }
+ }
+ }
+ if (nerrs){
+ nreadererr++;
+ fprintf(stderr, "found %d unexpected values in plane %llu\n", nerrs,
+ (unsigned long long)nplane);
+ }
+ }
+ /* Have read all current planes */
+ nplane_old=dims[0];
+
+ /* check if dataset has grown since last time */
+#if 0
+ /* close dsid and file, then reopen them */
+ if (H5Dclose(dsid) < 0){
+ fprintf(stderr, "H5Dclose failed\n");
+ return -1;
+ }
+ if (H5Fclose(fid) < 0){
+ fprintf(stderr, "H5Fclose failed\n");
+ return -1;
+ }
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY | (UC_opts.use_swmr ? H5F_ACC_SWMR_READ : 0), H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+#else
+ H5Drefresh(dsid);
+#endif
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ }
+
+ if (nreadererr)
+ return -1;
+ else
+ return 0;
+}
+
diff --git a/test/use_disable_mdc_flushes.c b/test/use_disable_mdc_flushes.c
new file mode 100644
index 0000000..5fd013f
--- /dev/null
+++ b/test/use_disable_mdc_flushes.c
@@ -0,0 +1,531 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This is copied from use_append_chunk.c with modifications to show
+ * the usage of H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled public routines.
+ */
+
+#include "h5test.h"
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+/* Global Variable definitions */
+const char *progname_g="use_disable_mdc_flushes"; /* program name */
+
+/* these two definitions must match each other */
+#define UC_DATATYPE H5T_NATIVE_SHORT /* use case HDF5 data type */
+#define UC_CTYPE short /* use case C data type */
+#define UC_RANK 3 /* use case dataset rank */
+#define Chunksize_DFT 256 /* chunksize default */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+
+
+char *filename_g;
+hsize_t nplanes_g;
+int use_swmr_g;
+int chunkplanes_g;
+int chunksize_g;
+hsize_t dims_g[UC_RANK];
+hsize_t max_dims_g[UC_RANK];
+hsize_t chunkdims_g[UC_RANK];
+
+static void usage(const char *prog);
+static int parse_option(int argc, char * const argv[]);
+static void show_parameters(void);
+static int create_file(void);
+static int setup_parameters(int argc, char * const argv[]);
+
+/*
+ * Note: Long options are not yet implemented.
+ *
+ * usage: use_disable_mdc_flushes [OPTIONS]
+ * OPTIONS
+ * -h, --help Print a usage message and exit
+ * -f FN Test file name [default: use_disable_mdc_flushes.h5]
+ * -n N, --nplanes=N Number of planes to write. [default: 1000]
+ * -s N, --swmr=N Use SWMR mode (0: no, non-0: yes) default is yes
+ * -z N, --chunksize=N Chunk size [default: 256]
+ * -y N, --chunkplanes=N Number of planes per chunk [default: 1]
+ */
+static void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h Print a usage message and exit\n");
+ fprintf(stderr, " -f FN Test file name [default: %s.h5]\n", prog);
+ fprintf(stderr, " -n N Number of planes to write. [default: 1000]\n");
+ fprintf(stderr, " -s N Use SWMR mode (0: no, non-0: yes) default is yes\n");
+ fprintf(stderr, " -z N Chunk size [default: %d]\n", Chunksize_DFT);
+ fprintf(stderr, " -y N Number of planes per chunk [default: 1]\n");
+ fprintf(stderr, "\n");
+} /* usage() */
+
+
+/*
+ * Setup Use Case parameters by parsing command line options.
+ * Setup default values if not set by options. */
+static int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *cmd_options = "f:hn:s:y:z:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, cmd_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'f': /* usecase data file name */
+ filename_g = optarg;
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((nplanes_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 's': /* use swmr file open mode */
+ if ((use_swmr_g = atoi(optarg)) < 0){
+ fprintf(stderr, "swmr value should be 0(no) or 1(yes)\n");
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'y': /* Number of planes per chunk */
+ if ((chunkplanes_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes per chunk %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'z': /* size of chunk=(z,z) */
+ if ((chunksize_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad chunksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* set test file name if not given */
+ if (!filename_g){
+ /* default data file name is <progname>.h5 */
+ if ((filename_g = (char*)HDmalloc(HDstrlen(progname_g)+4))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ Hgoto_error(-1);
+ };
+ HDstrcpy(filename_g, progname_g);
+ HDstrcat(filename_g, ".h5");
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+} /* parse_option() */
+
+/* Show parameters used for this use case */
+static void
+show_parameters(void)
+{
+ printf("===Parameters used:===\n");
+ printf("chunk dims=(%llu, %llu, %llu)\n", (unsigned long long)chunkdims_g[0],
+ (unsigned long long)chunkdims_g[1], (unsigned long long)chunkdims_g[2]);
+ printf("dataset max dims=(%llu, %llu, %llu)\n", (unsigned long long)max_dims_g[0],
+ (unsigned long long)max_dims_g[1], (unsigned long long)max_dims_g[2]);
+ printf("number of planes to write=%llu\n", (unsigned long long)nplanes_g);
+ printf("using SWMR mode=%s\n", use_swmr_g ? "yes(1)" : "no(0)");
+ printf("data filename=%s\n", filename_g);
+ printf("===Parameters shown===\n");
+} /* show_parameters() */
+
+/*
+ * Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ chunksize_g = Chunksize_DFT;
+ use_swmr_g = 1; /* use swmr open */
+ chunkplanes_g = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ chunkdims_g[0] = chunkplanes_g;
+ chunkdims_g[1]= chunkdims_g[2] = chunksize_g;
+
+ /* set dataset initial and max dims */
+ dims_g[0] = 0;
+ max_dims_g[0] = H5S_UNLIMITED;
+ dims_g[1] = dims_g[2] = max_dims_g[1] = max_dims_g[2] = chunksize_g;
+
+ /* set nplanes */
+ if (nplanes_g == 0)
+ nplanes_g = chunksize_g;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+} /* setup_parameters() */
+
+/*
+ * Create the skeleton use case file for testing.
+ * It has one 3d dataset using chunked storage.
+ * The dataset is (unlimited, chunksize, chunksize).
+ * Dataset type is 2 bytes integer.
+ * It starts out "empty", i.e., first dimension is 0.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+create_file(void)
+{
+ hsize_t dims[3]; /* Dataset starting dimensions */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t dsid; /* Dataset ID */
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+
+ /* Create the file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fcreate(filename_g, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Set up dimension sizes */
+ dims[0] = 0;
+ dims[1] = dims[2] = max_dims_g[1];
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(3, dims, max_dims_g)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 3, chunkdims_g) < 0)
+ return -1;
+
+ /* create dataset of progname */
+ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Check that the chunk index type is not version 1 B-tree.
+ * Version 1 B-trees are not supported under SWMR.
+ */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0)
+ return -1;
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting.\n");
+ return -1;
+ }
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ if(H5Pclose(fapl) < 0)
+ return -1;
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+} /* create_file() */
+
+/*
+ * Append planes, each of (1,2*chunksize,2*chunksize) to the dataset.
+ * In other words, 4 chunks are appended to the dataset at a time.
+ * Fill each plane with the plane number and then write it at the nth plane.
+ * Increase the plane number and repeat till the end of dataset, when it
+ * reaches chunksize long. End product is a (2*chunksize)^3 cube.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+write_file(void)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* data buffer */
+ hsize_t cz=chunksize_g; /* Chunk size */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t chunk_dims[3]; /* Chunk dimensions */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hbool_t disabled; /* Object's disabled status */
+ hsize_t i, j, k;
+
+ name = filename_g;
+
+ /* Open the file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ if(use_swmr_g)
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDWR | (use_swmr_g ? H5F_ACC_SWMR_WRITE : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* Disabled mdc flushed for the dataset */
+ if(H5Odisable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Odisable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* Get mdc disabled status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(dsid, &disabled) < 0) {
+ fprintf(stderr, "H5Oare_mdc_flushes_disabled failed\n");
+ return -1;
+ } else if(disabled)
+ printf("Dataset has disabled mdc flushes.\n");
+ else
+ printf("Dataset should have disabled its mdc flushes.\n");
+
+ /* Find chunksize used */
+ if ((dcpl = H5Dget_create_plist(dsid)) < 0){
+ fprintf(stderr, "H5Dget_create_plist failed\n");
+ return -1;
+ }
+ if (H5D_CHUNKED != H5Pget_layout(dcpl)){
+ fprintf(stderr, "storage layout is not chunked\n");
+ return -1;
+ }
+ if ((rank = H5Pget_chunk(dcpl, 3, chunk_dims)) != 3){
+ fprintf(stderr, "storage rank is not 3\n");
+ return -1;
+ }
+
+ /* verify chunk_dims against set paramenters */
+ if (chunk_dims[0]!= chunkdims_g[0] || chunk_dims[1] != cz || chunk_dims[2] != cz){
+ fprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
+ (unsigned long long)chunk_dims[2]);
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = dims_g[1];
+ memdims[2] = dims_g[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* write planes */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ for (i=0; i<nplanes_g; i++){
+ /* fill buffer with value i+1 */
+ bufptr = buffer;
+ for (j=0; j<dims[1]; j++)
+ for (k=0; k<dims[2]; k++)
+ *bufptr++ = i;
+
+ /* extend the dataset by one for new plane */
+ dims[0]=i+1;
+ if(H5Dset_extent(dsid, dims) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ start[0]=i;
+ /* Choose the next plane to write */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "Failed H5Sselect_hyperslab\n");
+ return -1;
+ }
+
+ /* Write plane to the dataset */
+ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "Failed H5Dwrite\n");
+ return -1;
+ }
+
+ /* Flush the dataset for every "chunkplanes_g" planes */
+ if(!((i + 1) % (hsize_t)chunkplanes_g)) {
+ if(H5Dflush(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Dflush dataset\n");
+ return -1;
+ }
+ }
+ }
+
+ if(H5Dflush(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Dflush dataset\n");
+ return -1;
+ }
+
+ /* Enable mdc flushes for the dataset */
+ /* Closing the dataset later will enable mdc flushes automatically if this is not done */
+ if(disabled)
+ if(H5Oenable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Oenable_mdc_flushes\n");
+ return -1;
+ }
+
+ /* Done writing. Free/Close all resources including data file */
+ HDfree(buffer);
+
+ if(H5Dclose(dsid) < 0){
+ fprintf(stderr, "Failed to close datasete\n");
+ return -1;
+ }
+ if(H5Sclose(m_sid) < 0){
+ fprintf(stderr, "Failed to close memory space\n");
+ return -1;
+ }
+ if(H5Sclose(f_sid) < 0){
+ fprintf(stderr, "Failed to close file space\n");
+ return -1;
+ }
+ if(H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+ if(H5Fclose(fid) < 0){
+ fprintf(stderr, "Failed to close file id\n");
+ return -1;
+ }
+
+ return 0;
+} /* write_file() */
+
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * Write to the file.
+ */
+int
+main(int argc, char *argv[])
+{
+ int ret_value = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ printf("Creating skeleton data file for testing H5Odisable_mdc_flushes()...\n");
+ if (create_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+
+ printf("writing to the file\n");
+ if (write_file() < 0){
+ fprintf(stderr, "write_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
diff --git a/test/vds_swmr.h b/test/vds_swmr.h
new file mode 100644
index 0000000..c043fd6
--- /dev/null
+++ b/test/vds_swmr.h
@@ -0,0 +1,165 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef VDS_SWMR_H
+#define VDS_SWMR_H
+
+#include <hdf5.h>
+
+/* virtual dataset <---> source dataset mapping and sizes
+
+ ***************** --+
+ * A * K
+ ***************** --+
+ * * |
+ * B * N
+ * * |
+ ***************** --+
+ * C *
+ *****************
+ * *
+ * D *
+ * *
+ *****************
+ * E *
+ *****************
+ * *
+ * F *
+ * *
+ *****************
+
+ | |
+ +-------M-------+
+
+
+ dim[0]
+ /
+ /
+ /
+ -----> dim[2]
+ |
+ |
+ |
+ dim[1]
+
+
+ NOTE: This use case also checks for varying numbers of written planes.
+ Dataset A contains the full number of planes and each successive
+ dataset contains one fewer plane, down to the last dataset, which
+ contains zero planes. Each dataset is set to have an (unlimited
+ dimension) extent equal to the number of planes written, so the
+ "empty" regions will contain the VDS fill value.
+*/
+
+
+/* All datasets are 3D */
+#define RANK 3
+
+/* Lengths of string identifiers (file, dataset names, etc.) */
+#define NAME_LEN 32
+
+/* Compression level */
+#define COMPRESSION_LEVEL 7
+
+/* Number of source files */
+#define N_SOURCES 6
+
+/* Dataset dimensions */
+#define SM_HEIGHT 2 /* K */
+#define LG_HEIGHT 4 /* N */
+#define SM_LG_HEIGHT 6 /* SM_HEIGHT + LG_HEIGHT */
+#define FULL_HEIGHT 18 /* (3 * K) + (3 * N) */
+#define HALF_HEIGHT 9
+#define WIDTH 8 /* M */
+#define HALF_WIDTH 4
+
+/* Max number of planes in the dataset */
+#define N_MAX_PLANES H5S_UNLIMITED
+
+/* Number of planes each writer will write */
+#define N_PLANES_TO_WRITE 25
+
+/* Dataset datatypes */
+#define SOURCE_DATATYPE H5T_STD_I32LE
+#define VDS_DATATYPE H5T_STD_I32LE
+
+/* Starting size of datasets, both source and VDS */
+static hsize_t DIMS[N_SOURCES][RANK] = {
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH},
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH},
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_DIMS[RANK] = {0, FULL_HEIGHT, WIDTH};
+
+/* Maximum size of datasets, both source and VDS.
+ * NOTE: Theoretical (i.e.: H5S_UNLIMITED), not the actual max
+ * number of planes written out by the writers before they stop.
+ * That number is specified separately.
+ */
+static hsize_t MAX_DIMS[N_SOURCES][RANK] = {
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH},
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH},
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_MAX_DIMS[RANK] = {N_MAX_PLANES, FULL_HEIGHT, WIDTH};
+
+/* Planes */
+static hsize_t PLANES[N_SOURCES][RANK] = {
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH},
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH},
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_PLANE[RANK] = {1, FULL_HEIGHT, WIDTH};
+
+/* File names for source datasets */
+static char FILE_NAMES[N_SOURCES][NAME_LEN] = {
+ {"vds_swmr_src_a.h5"},
+ {"vds_swmr_src_b.h5"},
+ {"vds_swmr_src_c.h5"},
+ {"vds_swmr_src_d.h5"},
+ {"vds_swmr_src_e.h5"},
+ {"vds_swmr_src_f.h5"}
+};
+
+/* VDS file name */
+static char VDS_FILE_NAME[NAME_LEN] = "vds_swmr.h5";
+
+/* Dataset names */
+static char SOURCE_DSET_NAME[NAME_LEN] = "source_dset";
+static char SOURCE_DSET_PATH[NAME_LEN] = "/source_dset";
+static char VDS_DSET_NAME[NAME_LEN] = "vds_dset";
+
+/* Fill values */
+static int32_t FILL_VALUES[N_SOURCES] = {
+ -1,
+ -2,
+ -3,
+ -4,
+ -5,
+ -6
+};
+static int32_t VDS_FILL_VALUE = -9;
+
+#endif /* VDS_SWMR_H */
+
diff --git a/test/vds_swmr_gen.c b/test/vds_swmr_gen.c
new file mode 100644
index 0000000..1706844
--- /dev/null
+++ b/test/vds_swmr_gen.c
@@ -0,0 +1,178 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(int argc, char *argv[])
+{
+ hid_t faplid = -1; /* file access property list ID (all files) */
+
+ hid_t src_sid = -1; /* source dataset's dataspace ID */
+ hid_t src_dcplid = -1; /* source dataset property list ID */
+
+ hid_t vds_sid = -1; /* VDS dataspace ID */
+ hid_t vds_dcplid = -1; /* VDS dataset property list ID */
+
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t did = -1; /* dataset ID */
+
+ hsize_t start[RANK]; /* starting point for hyperslab */
+ int map_start = -1; /* starting point in the VDS map */
+
+ int i; /* iterator */
+
+
+ /* Start by creating the virtual dataset (VDS) dataspace and creation
+ * property list. The individual source datasets are then created
+ * and the VDS map (stored in the VDS property list) is updated.
+ */
+
+ /* Create VDS dcpl */
+ if((vds_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(vds_dcplid, VDS_DATATYPE,
+ &VDS_FILL_VALUE) < 0)
+ TEST_ERROR
+
+ /* Create VDS dataspace */
+ if((vds_sid = H5Screate_simple(RANK, VDS_DIMS,
+ VDS_MAX_DIMS)) < 0)
+ TEST_ERROR
+
+ /************************************
+ * Create source files and datasets *
+ ************************************/
+
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ map_start = 0;
+
+ /* All SWMR files need to use the latest file format */
+ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+ if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+
+ for(i = 0; i < N_SOURCES; i++) {
+
+ /* source dataset dcpl */
+ if((src_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(src_dcplid, RANK, PLANES[i]) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(src_dcplid, SOURCE_DATATYPE,
+ &FILL_VALUES[i]) < 0)
+ TEST_ERROR
+
+ /* Use a mix of compressed and uncompressed datasets */
+ if(0 != i % 2)
+ if(H5Pset_deflate(src_dcplid, COMPRESSION_LEVEL) < 0)
+ TEST_ERROR
+
+ /* Create source file, dataspace, and dataset */
+ if((fid = H5Fcreate(FILE_NAMES[i], H5F_ACC_TRUNC,
+ H5P_DEFAULT, faplid)) < 0)
+ TEST_ERROR
+ if((src_sid = H5Screate_simple(RANK, DIMS[i],
+ MAX_DIMS[i])) < 0)
+ TEST_ERROR
+ if((did = H5Dcreate2(fid, SOURCE_DSET_NAME,
+ SOURCE_DATATYPE, src_sid,
+ H5P_DEFAULT, src_dcplid, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* set up hyperslabs for source and destination datasets */
+ start[1] = 0;
+ if(H5Sselect_hyperslab(src_sid, H5S_SELECT_SET, start, NULL,
+ MAX_DIMS[i], NULL) < 0)
+ TEST_ERROR
+ start[1] = map_start;
+ if(H5Sselect_hyperslab(vds_sid, H5S_SELECT_SET, start, NULL,
+ MAX_DIMS[i], NULL) < 0)
+ TEST_ERROR
+ map_start += PLANES[i][1];
+
+ /* Add VDS mapping */
+ if(H5Pset_virtual(vds_dcplid, vds_sid, FILE_NAMES[i],
+ SOURCE_DSET_PATH, src_sid) < 0)
+ TEST_ERROR
+
+ /* close */
+ if(H5Sclose(src_sid) < 0)
+ TEST_ERROR
+ if(H5Pclose(src_dcplid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ } /* end for */
+
+
+ /*******************
+ * Create VDS file *
+ *******************/
+
+ /* file */
+ if((fid = H5Fcreate(VDS_FILE_NAME, H5F_ACC_TRUNC,
+ H5P_DEFAULT, faplid)) < 0)
+ TEST_ERROR
+
+ /* dataset */
+ if((did = H5Dcreate2(fid, VDS_DSET_NAME, VDS_DATATYPE, vds_sid,
+ H5P_DEFAULT, vds_dcplid, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* close */
+ if(H5Pclose(faplid) < 0)
+ TEST_ERROR
+ if(H5Pclose(vds_dcplid) < 0)
+ TEST_ERROR
+ if(H5Sclose(vds_sid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(faplid >= 0)
+ (void)H5Pclose(faplid);
+ if(src_sid >= 0)
+ (void)H5Sclose(src_sid);
+ if(src_dcplid >= 0)
+ (void)H5Pclose(src_dcplid);
+ if(vds_sid >= 0)
+ (void)H5Sclose(vds_sid);
+ if(vds_dcplid >= 0)
+ (void)H5Pclose(vds_dcplid);
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ } H5E_END_TRY
+
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vds_swmr_reader.c b/test/vds_swmr_reader.c
new file mode 100644
index 0000000..34cb3eb
--- /dev/null
+++ b/test/vds_swmr_reader.c
@@ -0,0 +1,144 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(int argc, char *argv[])
+{
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t did = -1; /* dataset ID */
+ hid_t msid = -1; /* memory dataspace ID */
+ hid_t fsid = -1; /* file dataspace ID */
+
+ hsize_t start[RANK]; /* hyperslab start point */
+
+ int n_elements = 0; /* size of buffer (elements) */
+ size_t size = 0; /* size of buffer (bytes) */
+ int *buffer = NULL; /* data buffer */
+
+ int n_dims = -1; /* # dimensions in dataset */
+ hsize_t dims[RANK]; /* current size of dataset */
+ hsize_t max_dims[RANK]; /* max size of dataset */
+
+ hbool_t has_errors = FALSE;/* if the read data contains errors */
+
+ int i; /* iterator */
+
+
+ /* Open the VDS file and dataset */
+ if((fid = H5Fopen(VDS_FILE_NAME, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((did = H5Dopen2(fid, VDS_DSET_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create the read buffer */
+ n_elements = VDS_PLANE[1] * VDS_PLANE[2];
+ size = n_elements * sizeof(int);
+ if(NULL == (buffer = (int *)HDmalloc(size)))
+ TEST_ERROR
+
+ /* Create memory dataspace */
+ if((msid = H5Screate_simple(RANK, VDS_PLANE, NULL)) < 0)
+ TEST_ERROR
+
+ /* Read data until the dataset is full (via the writer) */
+ do {
+
+ /* Refresh metadata */
+ if(H5Drefresh(did) < 0)
+ TEST_ERROR
+
+ /* Get the dataset dimensions */
+ if((fsid = H5Dget_space(did)) < 0)
+ TEST_ERROR
+ if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0)
+ TEST_ERROR
+
+ /* Check the reported size of the VDS */
+ if((n_dims = H5Sget_simple_extent_ndims(fsid)) < 0)
+ TEST_ERROR
+ if(n_dims != RANK)
+ TEST_ERROR
+ if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0)
+ TEST_ERROR
+ /* NOTE: Don't care what dims[0] is. */
+ if(dims[1] != FULL_HEIGHT)
+ TEST_ERROR
+ if(dims[2] != WIDTH)
+ TEST_ERROR
+ if(max_dims[0] != H5S_UNLIMITED)
+ TEST_ERROR
+ if(max_dims[1] != FULL_HEIGHT)
+ TEST_ERROR
+ if(max_dims[2] != WIDTH)
+ TEST_ERROR
+
+ /* Continue if there's nothing to read */
+ if(0 == dims[0]) {
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+ continue;
+ }
+
+ /* Read a plane from the VDS */
+ /* At this time, we just make sure we can read planes without errors. */
+ start[0] = dims[0] - 1;
+ start[1] = 0;
+ start[2] = 0;
+ if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, VDS_PLANE, NULL) < 0)
+ TEST_ERROR
+ if(H5Dread(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0)
+ TEST_ERROR
+
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+
+ } while (dims[0] < N_PLANES_TO_WRITE);
+
+ /* Close file and dataset */
+ if(H5Sclose(msid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ HDfree(buffer);
+
+ HDfprintf(stderr, "SWMR reader exited successfully\n");
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ if(msid >= 0)
+ (void)H5Sclose(msid);
+ if(fsid >= 0)
+ (void)H5Sclose(fsid);
+ if(buffer != NULL)
+ free(buffer);
+ } H5E_END_TRY
+
+ HDfprintf(stderr, "ERROR: SWMR reader exited with errors\n");
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vds_swmr_writer.c b/test/vds_swmr_writer.c
new file mode 100644
index 0000000..fa1d1a0
--- /dev/null
+++ b/test/vds_swmr_writer.c
@@ -0,0 +1,159 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(int argc, char *argv[])
+{
+ int file_number = -1; /* Source file number */
+
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t faplid = -1; /* file access property list ID */
+ hid_t did = -1; /* dataset ID */
+ hid_t msid = -1; /* memory dataspace ID */
+ hid_t fsid = -1; /* file dataspace ID */
+
+ hsize_t extent[RANK]; /* dataset extents */
+ hsize_t start[RANK]; /* hyperslab start point */
+
+ int *buffer = NULL; /* data buffer */
+ int value = -1; /* value written to datasets */
+
+ hsize_t n_elements = 0; /* number of elements in a plane */
+
+ hsize_t i; /* iterator */
+ hsize_t j; /* iterator */
+
+
+ /******************************
+ * Fill a source dataset file *
+ ******************************/
+
+ /* The file number is passed on the command line.
+ * This is an integer index into the FILE_NAMES array.
+ */
+ if(argc != 2) {
+ HDfprintf(stderr, "ERROR: Must pass the source file number on the command line.\n");
+ return EXIT_FAILURE;
+ }
+
+ file_number = atoi(argv[1]);
+ if(file_number < 0 || file_number >= N_SOURCES)
+ TEST_ERROR
+
+ /* Open the source file and dataset */
+ /* All SWMR files need to use the latest file format */
+ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+ if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+ if((fid = H5Fopen(FILE_NAMES[file_number], H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, faplid)) < 0)
+ TEST_ERROR
+ if((did = H5Dopen2(fid, SOURCE_DSET_PATH, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+
+ /* Create a data buffer that represents a plane */
+ n_elements = PLANES[file_number][1] * PLANES[file_number][2];
+ if(NULL == (buffer = (int *)HDmalloc(n_elements * sizeof(int))))
+ TEST_ERROR
+
+ /* Create the memory dataspace */
+ if((msid = H5Screate_simple(RANK, PLANES[file_number], NULL)) < 0)
+ TEST_ERROR
+
+ /* Write planes to the dataset */
+ for(i = 0; i < N_PLANES_TO_WRITE; i++) {
+
+ unsigned delay; /* Time interval between plane writes */
+
+ /* Set the dataset's extent. This is inefficient but that's ok here. */
+ extent[0] = i + 1;
+ extent[1] = PLANES[file_number][1];
+ extent[2] = PLANES[file_number][2];
+ if(H5Dset_extent(did, extent) < 0)
+ TEST_ERROR
+
+ /* Get the file dataspace */
+ if((fsid = H5Dget_space(did)) < 0)
+ TEST_ERROR
+
+ /* Each plane is filled with the plane number as a data value. */
+ value = (((int)i + 1) * 10) + (int)i;
+ for(j = 0; j < n_elements; j++)
+ buffer[j] = value;
+
+ /* Set up the hyperslab for writing. */
+ start[0] = i;
+ start[1] = 0;
+ start[2] = 0;
+ if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, PLANES[file_number], NULL) < 0)
+ TEST_ERROR
+
+ /* Write the plane to the dataset. */
+ if(H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0)
+ TEST_ERROR
+
+ /* Wait one second between writing planes */
+ delay = time(0) + 1;
+ while(time(0) < delay)
+ ;
+
+ /* Flush */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ TEST_ERROR
+
+ } /* end for */
+
+ if(H5Pclose(faplid) < 0)
+ TEST_ERROR
+ if(H5Sclose(msid) < 0)
+ TEST_ERROR
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+ HDfree(buffer);
+
+ HDfprintf(stderr, "SWMR writer exited successfully\n");
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(faplid >= 0)
+ (void)H5Pclose(faplid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ if(msid >= 0)
+ (void)H5Sclose(msid);
+ if(fsid >= 0)
+ (void)H5Sclose(fsid);
+ if(buffer != NULL)
+ HDfree(buffer);
+ } H5E_END_TRY
+
+ HDfprintf(stderr, "ERROR: SWMR writer exited with errors\n");
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index b848827..8965cb4 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -389,8 +389,10 @@ static hbool_t serve_rw_count_reset_request(struct mssg_t * mssg_ptr);
/* call back functions & related data structures */
-static herr_t datum_get_load_size(const void * udata_ptr,
- size_t *image_len_ptr);
+static herr_t datum_get_load_size(const void *image_ptr,
+ const void *udata_ptr,
+ size_t *image_len_ptr,
+ size_t *actual_len_ptr);
static void * datum_deserialize(const void * image_ptr,
size_t len,
@@ -440,6 +442,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
/* mem_type */ H5FD_MEM_DEFAULT,
/* flags */ H5AC__CLASS_SKIP_READS | H5AC__CLASS_SKIP_WRITES,
/* get_load_size */ (H5AC_get_load_size_func_t)datum_get_load_size,
+ /* verify_chksum */ NULL,
/* deserialize */ (H5AC_deserialize_func_t)datum_deserialize,
/* image_len */ (H5AC_image_len_func_t)datum_image_len,
/* pre_serialize */ (H5AC_pre_serialize_func_t)NULL,
@@ -482,7 +485,7 @@ static hbool_t setup_cache_for_test(hid_t * fid_ptr,
H5C_t ** cache_ptr_ptr,
int metadata_write_strategy);
static void setup_rand(void);
-static hbool_t take_down_cache(hid_t fid);
+static hbool_t take_down_cache(hid_t fid, H5C_t * cache_ptr);
static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads);
static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes);
static hbool_t verify_total_reads(int expected_total_reads);
@@ -2333,8 +2336,8 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
*-------------------------------------------------------------------------
*/
static herr_t
-datum_get_load_size(const void * udata_ptr,
- size_t *image_len_ptr)
+datum_get_load_size(const void *image_ptr, const void *udata_ptr,
+ size_t *image_len_ptr, size_t *actual_len_ptr)
{
haddr_t addr = *(haddr_t *)udata_ptr;
int idx;
@@ -3052,7 +3055,7 @@ expunge_entry(H5F_t * file_ptr,
HDassert( ! ((entry_ptr->header).is_dirty) );
result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -3124,7 +3127,7 @@ insert_entry(H5C_t * cache_ptr,
entry_ptr->dirty = TRUE;
result = H5AC_insert_entry(file_ptr, H5P_DATASET_XFER_DEFAULT, &(types[0]),
- entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags);
+ entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags);
if ( ( result < 0 ) ||
( entry_ptr->header.type != &(types[0]) ) ||
@@ -4489,35 +4492,77 @@ setup_rand(void)
*
*****************************************************************************/
static hbool_t
-take_down_cache(hid_t fid)
+take_down_cache(hid_t fid, H5C_t * cache_ptr)
{
- hbool_t success = FALSE; /* will set to TRUE if appropriate. */
+ hbool_t success = TRUE; /* will set to FALSE if appropriate. */
- /* close the file and delete it */
- if ( H5Fclose(fid) < 0 ) {
+ /* flush the file -- this should write out any remaining test
+ * entries in the cache.
+ */
+ if ( ( success ) && ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) ) {
+ success = FALSE;
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
+ }
- } else if ( world_mpi_rank == world_server_mpi_rank ) {
+ /* Now reset the sync point done callback. Must do this as with
+ * the SWMR mods, the cache will do additional I/O on file close
+ * un-related to the test entries, and thereby corrupt our counts
+ * of entry writes.
+ */
+ if ( success ) {
- if ( HDremove(filenames[0]) < 0 ) {
+ if ( H5AC__set_sync_point_done_callback(cache_ptr, NULL) != SUCCEED ) {
+ success = FALSE;
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: HDremove() failed.\n",
+ HDfprintf(stdout,
+ "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
world_mpi_rank, FUNC);
}
- } else {
+ }
+
+
+ }
- success = TRUE;
+ /* close the file */
+ if ( ( success ) && ( H5Fclose(fid) < 0 ) ) {
+
+ success = FALSE;
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n",
+ world_mpi_rank, FUNC);
}
- } else {
- success = TRUE;
+ }
+
+ if ( success ) {
+
+ if ( world_mpi_rank == world_server_mpi_rank ) {
+
+ if ( HDremove(filenames[0]) < 0 ) {
+
+ success = FALSE;
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: HDremove() failed.\n",
+ world_mpi_rank, FUNC);
+ }
+ }
+ } else {
+
+ /* verify that there have been no further writes of test
+ * entries during the close
+ */
+ success = verify_total_writes(0);
+
+ }
}
return(success);
@@ -5574,7 +5619,7 @@ smoke_check_1(int metadata_write_strategy)
if ( fid >= 0 ) {
- if ( ! take_down_cache(fid) ) {
+ if ( ! take_down_cache(fid, cache_ptr) ) {
nerrors++;
if ( verbose ) {
@@ -5796,7 +5841,7 @@ smoke_check_2(int metadata_write_strategy)
if ( fid >= 0 ) {
- if ( ! take_down_cache(fid) ) {
+ if ( ! take_down_cache(fid, cache_ptr) ) {
nerrors++;
if ( verbose ) {
@@ -6119,7 +6164,7 @@ smoke_check_3(int metadata_write_strategy)
if ( fid >= 0 ) {
- if ( ! take_down_cache(fid) ) {
+ if ( ! take_down_cache(fid, cache_ptr) ) {
nerrors++;
if ( verbose ) {
@@ -6436,7 +6481,7 @@ smoke_check_4(int metadata_write_strategy)
if ( fid >= 0 ) {
- if ( ! take_down_cache(fid) ) {
+ if ( ! take_down_cache(fid, cache_ptr) ) {
nerrors++;
if ( verbose ) {
@@ -6646,7 +6691,7 @@ smoke_check_5(int metadata_write_strategy)
if ( fid >= 0 ) {
- if ( ! take_down_cache(fid) ) {
+ if ( ! take_down_cache(fid, cache_ptr) ) {
nerrors++;
if ( verbose ) {
@@ -6995,7 +7040,7 @@ trace_file_check(int metadata_write_strategy)
if ( fid >= 0 ) {
- if ( ! take_down_cache(fid) ) {
+ if ( ! take_down_cache(fid, cache_ptr) ) {
nerrors++;
if ( verbose ) {
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index ae022fb..d7d27b6 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2515,6 +2515,8 @@ compress_readAll(void)
int rank=1; /* Dataspace rank */
hsize_t dim=dim0; /* Dataspace dimensions */
unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
DATATYPE *data_read = NULL; /* data buffer */
DATATYPE *data_orig = NULL; /* expected data buffer */
const char *filename;
@@ -2541,116 +2543,132 @@ compress_readAll(void)
for(u=0; u<dim;u++)
data_orig[u]=u;
- /* Process zero creates the file with a compressed, chunked dataset */
- if(mpi_rank==0) {
- hsize_t chunk_dim; /* Chunk dimensions */
-
- /* Create the file */
- fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((fid > 0), "H5Fcreate succeeded");
-
- /* Create property list for chunking and compression */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl > 0), "H5Pcreate succeeded");
-
- ret = H5Pset_layout(dcpl, H5D_CHUNKED);
- VRFY((ret >= 0), "H5Pset_layout succeeded");
-
- /* Use eight chunks */
- chunk_dim = dim / 8;
- ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
- VRFY((ret >= 0), "H5Pset_chunk succeeded");
+ /* Run test both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if(mpi_rank==0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim = dim / 8;
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* Set chunk options appropriately */
+ if(disable_partial_chunk_filters) {
+ ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
+ VRFY((ret>=0),"H5Pget_chunk_opts succeeded");
+
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ ret = H5Pset_chunk_opts(dcpl, chunk_opts);
+ VRFY((ret>=0),"H5Pset_chunk_opts succeeded");
+ } /* end if */
+
+ ret = H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dcreate2 succeeded");
+
+ /* Write compressed data */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
- ret = H5Pset_deflate(dcpl, 9);
- VRFY((ret >= 0), "H5Pset_deflate succeeded");
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
- /* Create dataspace */
- dataspace = H5Screate_simple(rank, &dim, NULL);
- VRFY((dataspace > 0), "H5Screate_simple succeeded");
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
- /* Create dataset */
- dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dcreate2 succeeded");
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
- /* Write compressed data */
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
- VRFY((ret >= 0), "H5Dwrite succeeded");
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
- /* Close objects */
- ret = H5Pclose(dcpl);
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Sclose(dataspace);
- VRFY((ret >= 0), "H5Sclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
- }
-
- /* Wait for file to be created */
- MPI_Barrier(comm);
-
- /* -------------------
- * OPEN AN HDF5 FILE
- * -------------------*/
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
- VRFY((fid > 0), "H5Fopen succeeded");
-
- /* Release file-access template */
- ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "H5Pclose succeeded");
- /* Open dataset with compressed chunks */
- dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
- VRFY((dataset > 0), "H5Dopen2 succeeded");
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
- /* Try reading & writing data */
- if(dataset>0) {
- /* Create dataset transfer property list */
- xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist > 0), "H5Pcreate succeeded");
+ /* Try reading & writing data */
+ if(dataset>=0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
- /* Try reading the data */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ /* Try reading the data */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- /* Verify data read */
- for(u=0; u<dim; u++)
- if(data_orig[u]!=data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
- (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
- nerrors++;
- }
+ /* Verify data read */
+ for(u=0; u<dim; u++)
+ if(data_orig[u]!=data_read[u]) {
+ printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ nerrors++;
+ }
- /* Writing to the compressed, chunked dataset in parallel should fail */
- H5E_BEGIN_TRY {
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- } H5E_END_TRY;
- VRFY((ret < 0), "H5Dwrite failed");
+ /* Writing to the compressed, chunked dataset in parallel should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ } H5E_END_TRY;
+ VRFY((ret < 0), "H5Dwrite failed");
- ret = H5Pclose(xfer_plist);
- VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose succeeded");
- } /* end if */
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ /* Close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ } /* end for */
/* release data buffers */
if(data_read) HDfree(data_read);
@@ -4134,16 +4152,20 @@ dataset_atomicity(void)
MPI_Barrier (comm);
/* make sure setting atomicity fails on a serial file ID */
- /* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeed");
+ /* file locking allows only one file open (serial) for writing */
+ if(MAINPROCESS){
+ fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeed");
+ }
/* should fail */
ret = H5Fset_mpi_atomicity (fid , TRUE);
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ if(MAINPROCESS){
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
MPI_Barrier (comm);
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index a2246b6..96e65d5 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -240,10 +240,10 @@ file_image_daisy_chain_test(void)
if(vector_ptr[i] != i)
vector_ok = FALSE;
VRFY((vector_ok), "verified received vector.");
-
- HDfree(vector_ptr);
- vector_ptr = NULL;
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
/* 7) closes the core file and exit. */
err = H5Sclose(space_id);
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 5e1cd04..14f37be 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -213,6 +213,8 @@ test_filter_read(void)
hid_t dc; /* HDF5 IDs */
const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset without filters */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
herr_t hrc;
const char *filename;
hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
@@ -254,81 +256,111 @@ test_filter_read(void)
hrc = H5Pclose (dc);
VRFY(hrc>=0,"H5Pclose");
- /*----------------------------------------------------------
- * STEP 1: Test Fletcher32 Checksum by itself.
- *----------------------------------------------------------
- */
-
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0,"H5Pset_filter");
+ /* Run steps 1-3 both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Set chunk options appropriately */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0,"H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0,"H5Pset_filter");
- hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
- VRFY(hrc>=0,"H5Pset_filter");
+ hrc = H5Pget_chunk_opts(dc, &chunk_opts);
+ VRFY(hrc>=0,"H5Pget_chunk_opts");
- filter_read_internal(filename,dc,&fletcher32_size);
- VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
+ if(disable_partial_chunk_filters)
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0,"H5Pclose");
+ /*----------------------------------------------------------
+ * STEP 1: Test Fletcher32 Checksum by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_FLETCHER32
- /*----------------------------------------------------------
- * STEP 2: Test deflation by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_DEFLATE
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0,"H5Pset_filter");
- dc = H5Pcreate(H5P_DATASET_CREATE);
- VRFY(dc>=0, "H5Pcreate");
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0,"H5Pset_filter");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
- VRFY(hrc>=0, "H5Pset_chunk");
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
- hrc = H5Pset_deflate (dc, 6);
- VRFY(hrc>=0, "H5Pset_deflate");
+ hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL);
+ VRFY(hrc>=0,"H5Pset_filter");
- filter_read_internal(filename,dc,&deflate_size);
+ filter_read_internal(filename,dc,&fletcher32_size);
+ VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect.");
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
- VRFY(hrc>=0, "H5Pclose");
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0, "H5Pclose");
-#endif /* H5_HAVE_FILTER_DEFLATE */
+#endif /* H5_HAVE_FILTER_FLETCHER32 */
+ /*----------------------------------------------------------
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
- /*----------------------------------------------------------
- * STEP 3: Test szip compression by itself.
- *----------------------------------------------------------
- */
-#ifdef H5_HAVE_FILTER_SZIP
- if(h5_szip_can_encode() == 1) {
dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
hrc = H5Pset_chunk (dc, 2, chunk_size);
VRFY(hrc>=0, "H5Pset_chunk");
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
- VRFY(hrc>=0, "H5Pset_szip");
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
- filter_read_internal(filename,dc,&szip_size);
+ hrc = H5Pset_deflate (dc, 6);
+ VRFY(hrc>=0, "H5Pset_deflate");
+
+ filter_read_internal(filename,dc,&deflate_size);
/* Clean up objects used for this test */
hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
- }
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 3: Test szip compression by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+ if(h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc>=0, "H5Pcreate");
+
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
+ VRFY(hrc>=0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts (dc, chunk_opts);
+ VRFY(hrc>=0,"H5Pset_chunk_opts");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc>=0, "H5Pset_szip");
+
+ filter_read_internal(filename,dc,&szip_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
+ VRFY(hrc>=0, "H5Pclose");
+ }
#endif /* H5_HAVE_FILTER_SZIP */
+ } /* end for */
/*----------------------------------------------------------
* STEP 4: Test shuffling by itself.
*----------------------------------------------------------
*/
-
+#ifdef H5_HAVE_FILTER_SHUFFLE
dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
@@ -345,12 +377,13 @@ test_filter_read(void)
hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
+#endif /* H5_HAVE_FILTER_SHUFFLE */
/*----------------------------------------------------------
* STEP 5: Test shuffle + deflate + checksum in any order.
*----------------------------------------------------------
*/
-#ifdef H5_HAVE_FILTER_DEFLATE
+#if defined H5_HAVE_FILTER_DEFLATE && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
/* Testing shuffle+deflate+checksum filters (checksum first) */
dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
@@ -395,13 +428,13 @@ test_filter_read(void)
hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
-#endif /* H5_HAVE_FILTER_DEFLATE */
+#endif /* H5_HAVE_FILTER_DEFLATE && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
/*----------------------------------------------------------
* STEP 6: Test shuffle + szip + checksum in any order.
*----------------------------------------------------------
*/
-#ifdef H5_HAVE_FILTER_SZIP
+#if defined H5_HAVE_FILTER_SZIP && defined H5_HAVE_FILTER_SHUFFLE && defined H5_HAVE_FILTER_FLETCHER32
/* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */
dc = H5Pcreate(H5P_DATASET_CREATE);
@@ -453,6 +486,6 @@ test_filter_read(void)
VRFY(hrc>=0, "H5Pclose");
}
-#endif /* H5_HAVE_FILTER_SZIP */
+#endif /* H5_HAVE_FILTER_SZIP && H5_HAVE_FILTER_SHUFFLE && H5_HAVE_FILTER_FLETCHER32 */
}
diff --git a/tools/Makefile.am b/tools/Makefile.am
index 095cc30..bffc14d 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -24,7 +24,6 @@ include $(top_srcdir)/config/commence.am
CONFIG=ordered
# All subdirectories
-SUBDIRS=lib h5diff h5ls h5dump misc h5import h5repack h5jam h5copy h5stat \
- perform
+SUBDIRS=lib h5diff h5ls h5dump misc h5import h5repack h5jam h5copy h5stat h5format_convert perform
include $(top_srcdir)/config/conclude.am
diff --git a/tools/h5copy/h5copygentest.c b/tools/h5copy/h5copygentest.c
index 49204f5..45d4e16 100644
--- a/tools/h5copy/h5copygentest.c
+++ b/tools/h5copy/h5copygentest.c
@@ -22,6 +22,7 @@
/* HDF file names */
#define HDF_FILE1 "h5copytst.h5"
+#define HDF_FILE1_NEW "h5copytst_new.h5"
#define HDF_FILE2 "h5copy_ref.h5"
#define HDF_EXT_SRC_FILE "h5copy_extlinks_src.h5"
#define HDF_EXT_TRG_FILE "h5copy_extlinks_trg.h5"
@@ -644,20 +645,41 @@ out:
*------------------------------------------------------------------------*/
static void Test_Obj_Copy(void)
{
- hid_t fid=0;
+ hid_t fid = (-1); /* File id */
+ hid_t fapl_new = (-1); /* File access property id */
+ unsigned new_format; /* New format or old format */
- /* Create source file */
- fid = H5Fcreate(HDF_FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- if (fid < 0)
- {
- fprintf(stderr, "Error: %s> H5Fcreate failed.\n", HDF_FILE1);
+ if((fapl_new = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
+ fprintf(stderr, "Error: H5Pcreate failed.\n");
goto out;
}
+ if(H5Pset_libver_bounds(fapl_new, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) {
+ fprintf(stderr, "Error: H5Pset_libver_bounds failed.\n");
+ goto out;
+ }
+
+ /* Test with old & new format groups */
+ for(new_format = FALSE; new_format <= TRUE; new_format++) {
+
+ /* Set the FAPL for the type of format */
+ /* Create source file */
+ if(new_format)
+ fid = H5Fcreate(HDF_FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_new);
+ else
+ fid = H5Fcreate(HDF_FILE1_NEW, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ if(fid < 0) {
+ fprintf(stderr, "Error: H5Fcreate failed.\n");
+ goto out;
+ }
+
+ gent_datasets(fid);
+ gent_empty_group(fid);
+ gent_nested_datasets(fid);
+ gent_nested_group(fid);
- gent_datasets(fid);
- gent_empty_group(fid);
- gent_nested_datasets(fid);
- gent_nested_group(fid);
+ H5Fclose(fid);
+ fid = (-1);
+ } /* end for */
out:
/*-----------------------------------------------------------------------
@@ -665,6 +687,8 @@ out:
*------------------------------------------------------------------------*/
if(fid > 0)
H5Fclose(fid);
+ if(fapl_new > 0)
+ H5Pclose(fapl_new);
}
/*-------------------------------------------------------------------------
diff --git a/tools/h5copy/testfiles/h5copytst.h5 b/tools/h5copy/testfiles/h5copytst.h5
index f407f82..0f10410 100644
--- a/tools/h5copy/testfiles/h5copytst.h5
+++ b/tools/h5copy/testfiles/h5copytst.h5
Binary files differ
diff --git a/tools/h5copy/testfiles/h5copytst_new.h5 b/tools/h5copy/testfiles/h5copytst_new.h5
new file mode 100644
index 0000000..57e1805
--- /dev/null
+++ b/tools/h5copy/testfiles/h5copytst_new.h5
Binary files differ
diff --git a/tools/h5copy/testfiles/h5copytst_new.out.ls b/tools/h5copy/testfiles/h5copytst_new.out.ls
new file mode 100644
index 0000000..9df6b2e
--- /dev/null
+++ b/tools/h5copy/testfiles/h5copytst_new.out.ls
@@ -0,0 +1,502 @@
+#############################
+Expected output for 'h5ls ../testfiles/h5copytst_new.out.h5'
+#############################
+Opened "../testfiles/h5copytst_new.out.h5" with sec2 driver.
+/ Group
+ Location: 1:96
+ Links: 1
+/A Group
+ Location: 1:65602
+ Links: 1
+/A/B1 Group
+ Location: 1:66306
+ Links: 1
+/A/B1/simple Dataset {6/6}
+ Location: 1:65509
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/A/B2 Group
+ Location: 1:69807
+ Links: 1
+/A/B2/simple2 Dataset {6/6}
+ Location: 1:69714
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/C Group
+ Location: 1:72980
+ Links: 1
+/C/D Group
+ Location: 1:73684
+ Links: 1
+/C/D/simple Dataset {6/6}
+ Location: 1:72887
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/E Group
+ Location: 1:76217
+ Links: 1
+/E/F Group
+ Location: 1:76921
+ Links: 1
+/E/F/grp_dsets Group
+ Location: 1:75044
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/E/F/grp_dsets/chunk Dataset {6/6}
+ Location: 1:76014
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/E/F/grp_dsets/compact Dataset {6/6}
+ Location: 1:75367
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/E/F/grp_dsets/compound Dataset {2/2}
+ Location: 1:75470
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/E/F/grp_dsets/compressed Dataset {6/6}
+ Location: 1:75683
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/E/F/grp_dsets/named_vl Dataset {2/2}
+ Location: 1:75853
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:75793 variable length of
+ 32-bit little-endian integer
+/E/F/grp_dsets/nested_vl Dataset {2/2}
+ Location: 1:76108
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/E/F/grp_dsets/simple Dataset {6/6}
+ Location: 1:75274
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/E/F/grp_dsets/vl Type
+ Location: 1:75793
+ Links: 2
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Type: shared-1:75793 variable length of
+ 32-bit little-endian integer
+/G Group
+ Location: 1:85688
+ Links: 1
+/G/H Group
+ Location: 1:86392
+ Links: 1
+/G/H/grp_nested Group
+ Location: 1:84436
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/G/H/grp_nested/grp_dsets Group
+ Location: 1:84515
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/G/H/grp_nested/grp_dsets/chunk Dataset {6/6}
+ Location: 1:85485
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/G/H/grp_nested/grp_dsets/compact Dataset {6/6}
+ Location: 1:84838
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/G/H/grp_nested/grp_dsets/compound Dataset {2/2}
+ Location: 1:84941
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/G/H/grp_nested/grp_dsets/compressed Dataset {6/6}
+ Location: 1:85154
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/G/H/grp_nested/grp_dsets/named_vl Dataset {2/2}
+ Location: 1:85324
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:85264 variable length of
+ 32-bit little-endian integer
+/G/H/grp_nested/grp_dsets/nested_vl Dataset {2/2}
+ Location: 1:85579
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/G/H/grp_nested/grp_dsets/simple Dataset {6/6}
+ Location: 1:84745
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/G/H/grp_nested/grp_dsets/vl Type
+ Location: 1:85264
+ Links: 2
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Type: shared-1:85264 variable length of
+ 32-bit little-endian integer
+/chunk Dataset {6/6}
+ Location: 1:2238
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/compact Dataset {6/6}
+ Location: 1:4240
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/compound Dataset {2/2}
+ Location: 1:6391
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/compressed Dataset {6/6}
+ Location: 1:6604
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/grp_dsets Group
+ Location: 1:27748
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/grp_dsets/chunk Dataset {6/6}
+ Location: 1:28718
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_dsets/compact Dataset {6/6}
+ Location: 1:28071
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_dsets/compound Dataset {2/2}
+ Location: 1:28174
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/grp_dsets/compressed Dataset {6/6}
+ Location: 1:28387
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/grp_dsets/named_vl Dataset {2/2}
+ Location: 1:28557
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:28497 variable length of
+ 32-bit little-endian integer
+/grp_dsets/nested_vl Dataset {2/2}
+ Location: 1:28812
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/grp_dsets/simple Dataset {6/6}
+ Location: 1:27978
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_dsets/simple_group Dataset {6/6}
+ Location: 1:46180
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_dsets/vl Type
+ Location: 1:28497
+ Links: 2
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Type: shared-1:28497 variable length of
+ 32-bit little-endian integer
+/grp_empty Group
+ Location: 1:27693
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/grp_nested Group
+ Location: 1:35940
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/grp_nested/grp_dsets Group
+ Location: 1:36019
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/grp_nested/grp_dsets/chunk Dataset {6/6}
+ Location: 1:36989
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_nested/grp_dsets/compact Dataset {6/6}
+ Location: 1:36342
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_nested/grp_dsets/compound Dataset {2/2}
+ Location: 1:36445
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/grp_nested/grp_dsets/compressed Dataset {6/6}
+ Location: 1:36658
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/grp_nested/grp_dsets/named_vl Dataset {2/2}
+ Location: 1:36828
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:36768 variable length of
+ 32-bit little-endian integer
+/grp_nested/grp_dsets/nested_vl Dataset {2/2}
+ Location: 1:37083
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/grp_nested/grp_dsets/simple Dataset {6/6}
+ Location: 1:36249
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_nested/grp_dsets/vl Type
+ Location: 1:36768
+ Links: 2
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Type: shared-1:36768 variable length of
+ 32-bit little-endian integer
+/grp_rename Group
+ Location: 1:47077
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/grp_rename/chunk Dataset {6/6}
+ Location: 1:48047
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_rename/compact Dataset {6/6}
+ Location: 1:47400
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_rename/compound Dataset {2/2}
+ Location: 1:47503
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/grp_rename/compressed Dataset {6/6}
+ Location: 1:47716
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/grp_rename/grp_dsets Group
+ Location: 1:55269
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+/grp_rename/grp_dsets/chunk Dataset {6/6}
+ Location: 1:56239
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_rename/grp_dsets/compact Dataset {6/6}
+ Location: 1:55592
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_rename/grp_dsets/compound Dataset {2/2}
+ Location: 1:55695
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/grp_rename/grp_dsets/compressed Dataset {6/6}
+ Location: 1:55908
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Chunks: {2} 8 bytes
+ Storage: <details removed for portability>
+ Filter-0: deflate-1 OPT {1}
+ Type: 32-bit little-endian integer
+/grp_rename/grp_dsets/named_vl Dataset {2/2}
+ Location: 1:56078
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:56018 variable length of
+ 32-bit little-endian integer
+/grp_rename/grp_dsets/nested_vl Dataset {2/2}
+ Location: 1:56333
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/grp_rename/grp_dsets/simple Dataset {6/6}
+ Location: 1:55499
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_rename/grp_dsets/vl Type
+ Location: 1:56018
+ Links: 2
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Type: shared-1:56018 variable length of
+ 32-bit little-endian integer
+/grp_rename/named_vl Dataset {2/2}
+ Location: 1:47886
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:47826 variable length of
+ 32-bit little-endian integer
+/grp_rename/nested_vl Dataset {2/2}
+ Location: 1:48141
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/grp_rename/simple Dataset {6/6}
+ Location: 1:47307
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/grp_rename/vl Type
+ Location: 1:47826
+ Links: 2
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Type: shared-1:47826 variable length of
+ 32-bit little-endian integer
+/named_vl Dataset {2/2}
+ Location: 1:8657
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: shared-1:8606 variable length of
+ 32-bit little-endian integer
+/nested_vl Dataset {2/2}
+ Location: 1:22942
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: variable length of
+ variable length of
+ 32-bit little-endian integer
+/rename Dataset {2/2}
+ Location: 1:27240
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: struct {
+ "str1" +0 20-byte null-terminated ASCII string
+ "str2" +20 20-byte null-terminated ASCII string
+ } 40 bytes
+/simple Dataset {6/6}
+ Location: 1:800
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
+/simple_top Dataset {6/6}
+ Location: 1:25099
+ Links: 1
+ Modified: XXXX-XX-XX XX:XX:XX XXX
+ Storage: <details removed for portability>
+ Type: 32-bit little-endian integer
diff --git a/tools/h5diff/h5diffgentest.c b/tools/h5diff/h5diffgentest.c
index 8f92660..937bd32 100644
--- a/tools/h5diff/h5diffgentest.c
+++ b/tools/h5diff/h5diffgentest.c
@@ -62,6 +62,8 @@ hsize_t H5TOOLS_MALLOCSIZE = (128 * 1024 * 1024);
#define FILE18 "h5diff_ext2softlink_trg.h5"
#define FILE19 "h5diff_dset_zero_dim_size1.h5"
#define FILE20 "h5diff_dset_zero_dim_size2.h5"
+#define FILE21 "h5diff_dset_idx1.h5"
+#define FILE22 "h5diff_dset_idx2.h5"
#define DANGLE_LINK_FILE1 "h5diff_danglelinks1.h5"
#define DANGLE_LINK_FILE2 "h5diff_danglelinks2.h5"
#define GRP_RECURSE_FILE1 "h5diff_grp_recurse1.h5"
@@ -165,6 +167,7 @@ static void gen_datareg(hid_t fid,int make_diffs);
/* utilities */
static int write_attr(hid_t loc_id,int rank,hsize_t *dims,const char *name,hid_t tid,void *buf);
static int write_dset(hid_t loc_id,int rank,hsize_t *dims,const char *name,hid_t tid,void *buf);
+static int gen_dataset_idx(const char *file, int format);
/*-------------------------------------------------------------------------
@@ -211,6 +214,15 @@ int main(void)
test_special_datasets(FILE19,0);
test_special_datasets(FILE20,1);
+ /*
+ * Generate 2 files: FILE21 with old format; FILE22 with new format
+ * Create 2 datasets in each file:
+ * One dataset: chunked layout, w/o filters, fixed dimension
+ * One dataset: chunked layout, w/ filters, fixed dimension
+ */
+ gen_dataset_idx(FILE21, 0);
+ gen_dataset_idx(FILE22, 1);
+
test_dangle_links(DANGLE_LINK_FILE1, DANGLE_LINK_FILE2);
test_group_recurse(GRP_RECURSE_FILE1, GRP_RECURSE_FILE2);
@@ -2105,6 +2117,95 @@ out:
}
/*-------------------------------------------------------------------------
+* Function: gen_dataset_idx
+*
+* Purpose: Create a file with either the new or old format
+* Create two datasets in the file:
+* one dataset: fixed dimension, chunked layout, w/o filters
+* one dataset: fixed dimension, chunked layout, w/ filters
+*
+*-------------------------------------------------------------------------
+*/
+static
+int gen_dataset_idx(const char *file, int format)
+{
+ hid_t fid; /* file id */
+ hid_t did, did2; /* dataset id */
+ hid_t sid; /* space id */
+ hid_t fapl; /* file access property id */
+ hid_t dcpl; /* dataset creation property id */
+ hsize_t dims[1] = {10}; /* dataset dimension */
+ hsize_t c_dims[1] = {2}; /* chunk dimension */
+ herr_t status; /* return status */
+ int buf[10]; /* data buffer */
+ int i; /* local index variable */
+
+ /* Get a copy of the file aaccess property */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+
+ /* Set the "use the latest format" bounds for creating objects in the file */
+ if(format) {
+ status = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ assert(status >= 0);
+ }
+
+ /* Create a file */
+ if((fid = H5Fcreate(file, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Create data */
+ for(i = 0; i < 10; i++)
+ buf[i] = i;
+
+ /* Set chunk */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ status = H5Pset_chunk(dcpl, 1, c_dims);
+ assert(status >= 0);
+
+ /* Create a 1D dataset */
+ sid = H5Screate_simple(1, dims, NULL);
+ did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+
+ /* Write to the dataset */
+ status = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ assert(status >= 0);
+
+#if defined (H5_HAVE_FILTER_DEFLATE)
+ /* set deflate data */
+ status = H5Pset_deflate(dcpl, 9);
+ assert(status >= 0);
+
+ /* Create and write the dataset */
+ did2 = H5Dcreate2(fid, "dset_filter", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ status = H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ assert(status >= 0);
+
+ /* Close the dataset */
+ status = H5Dclose(did2);
+ assert(status >= 0);
+
+#endif
+
+ /* closing: dataspace, dataset, file */
+ status = H5Sclose(sid);
+ assert(status >= 0);
+
+ status = H5Dclose(did);
+ assert(status >= 0);
+
+ status = H5Fclose(fid);
+ assert(status >= 0);
+
+ status = H5Pclose(dcpl);
+ assert(status >= 0);
+
+ status = H5Pclose(fapl);
+ assert(status >= 0);
+
+ return status;
+}
+
+/*-------------------------------------------------------------------------
*
* Purpose: Create test files to compare dangling links in various way
*
diff --git a/tools/h5diff/testfiles/h5diff_dset_idx1.h5 b/tools/h5diff/testfiles/h5diff_dset_idx1.h5
new file mode 100644
index 0000000..3252303
--- /dev/null
+++ b/tools/h5diff/testfiles/h5diff_dset_idx1.h5
Binary files differ
diff --git a/tools/h5diff/testfiles/h5diff_dset_idx2.h5 b/tools/h5diff/testfiles/h5diff_dset_idx2.h5
new file mode 100644
index 0000000..db7584d
--- /dev/null
+++ b/tools/h5diff/testfiles/h5diff_dset_idx2.h5
Binary files differ
diff --git a/tools/h5diff/testfiles/h5diff_hyper1.h5 b/tools/h5diff/testfiles/h5diff_hyper1.h5
index ceeff80..1fd47e1 100644
--- a/tools/h5diff/testfiles/h5diff_hyper1.h5
+++ b/tools/h5diff/testfiles/h5diff_hyper1.h5
Binary files differ
diff --git a/tools/h5diff/testfiles/h5diff_hyper2.h5 b/tools/h5diff/testfiles/h5diff_hyper2.h5
index 05a2eb1..ad2f468 100644
--- a/tools/h5diff/testfiles/h5diff_hyper2.h5
+++ b/tools/h5diff/testfiles/h5diff_hyper2.h5
Binary files differ
diff --git a/tools/h5diff/testfiles/h5diff_idx.txt b/tools/h5diff/testfiles/h5diff_idx.txt
new file mode 100644
index 0000000..754d3ea
--- /dev/null
+++ b/tools/h5diff/testfiles/h5diff_idx.txt
@@ -0,0 +1,14 @@
+
+file1 file2
+---------------------------------------
+ x x /
+ x x /dset
+ x x /dset_filter
+
+group : </> and </>
+0 differences found
+dataset: </dset> and </dset>
+0 differences found
+dataset: </dset_filter> and </dset_filter>
+0 differences found
+EXIT CODE: 0
diff --git a/tools/h5diff/testfiles/tmptest.he5 b/tools/h5diff/testfiles/tmptest.he5
index edcfcd2..7dedfc1 100644
--- a/tools/h5diff/testfiles/tmptest.he5
+++ b/tools/h5diff/testfiles/tmptest.he5
Binary files differ
diff --git a/tools/h5diff/testfiles/tmptest2.he5 b/tools/h5diff/testfiles/tmptest2.he5
index a6ab02b..0ce63dc 100644
--- a/tools/h5diff/testfiles/tmptest2.he5
+++ b/tools/h5diff/testfiles/tmptest2.he5
Binary files differ
diff --git a/tools/h5dump/h5dump_xml.c b/tools/h5dump/h5dump_xml.c
index 5bc43eb..00f894b 100644
--- a/tools/h5dump/h5dump_xml.c
+++ b/tools/h5dump/h5dump_xml.c
@@ -69,6 +69,7 @@ static h5tool_format_t xml_dataformat = {
"", /*cmpd_pre */
"", /*cmpd_suf */
"", /*cmpd_end */
+ "", /*cmpd_listv */
" ", /*vlen_sep */
" ", /*vlen_pre */
diff --git a/tools/h5dump/h5dumpgentest.c b/tools/h5dump/h5dumpgentest.c
index 250f96f..059fee5 100644
--- a/tools/h5dump/h5dumpgentest.c
+++ b/tools/h5dump/h5dumpgentest.c
@@ -98,6 +98,7 @@
#define FILE66 "packedbits.h5"
#define FILE67 "zerodim.h5"
#define FILE68 "charsets.h5"
+#define FILE68a "tdset_idx.h5"
#define FILE69 "tattrintsize.h5"
#define FILE70 "tcmpdintsize.h5"
#define FILE71 "tcmpdattrintsize.h5"
@@ -286,6 +287,16 @@ typedef struct s1_t {
#define F66_YDIM64 64
#define F66_DUMMYDBL "DummyDBL"
+/* Declarations for gent_dataset_idx() for "FILE68a" */
+#define F68a_DSET_FIXED "dset_fixed"
+#define F68a_DSET_FIXED_FILTER "dset_filter"
+#define F68a_DSET_BTREE "dset_btree"
+#define F68a_DIM200 200
+#define F68a_DIM100 100
+#define F68a_DIM20 20
+#define F68a_DIM10 10
+#define F68a_CHUNK 5
+
/* "FILE70" macros and for FILE71 */
/* Name of dataset to create in datafile */
#define F70_DATASETNAME "CompoundIntSize"
@@ -7012,6 +7023,90 @@ gent_fs_strategy_threshold(void)
H5Pclose(fcpl);
}
+/*
+ * Create a file with new format:
+ * Create one dataset with (set_chunk, fixed dims, null max. dims)
+ * so that Fixed Array indexing will be used.
+ * Create one dataset with (set_chunk, fixed dims, null max. dims, filter)
+ * so that Fixed Array indexing will be used.
+ * Create one dataset with (set_chunk, fixed dims, fixed max. dims)
+ * so that Fixed Array indexing will be used.
+ *
+ * Modifications:
+ * Fixed Array indexing will be used for chunked dataset
+ * with fixed max. dims setting.
+ *
+ */
+static void
+gent_dataset_idx(void)
+{
+ hid_t fid, space, dcpl, fapl;
+ hsize_t dims[2];
+ hsize_t maxdims[2];
+ int buf[20][10];
+ int i, j, ret;
+
+ /* Get a copy of the file aaccess property */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+
+ /* Set the "use the latest version of the format" bounds for creating objects in the file */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ assert(ret >= 0);
+
+ fid = H5Fcreate(FILE68a, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+
+ dims[0] = F68a_CHUNK;
+ dims[1] = F68a_CHUNK;
+
+ /* set chunk */
+ ret = H5Pset_chunk(dcpl, RANK, dims);
+ assert(ret >= 0);
+
+ /* dataset with fixed dimensions */
+ dims[0] = F68a_DIM20;
+ dims[1] = F68a_DIM10;
+ space = H5Screate_simple(RANK, dims, NULL);
+
+ for(i = 0; i < F68a_DIM20; i++)
+ for(j = 0; j < F68a_DIM10; j++)
+ buf[i][j] = j;
+
+ ret = make_dset(fid, F68a_DSET_FIXED, space, H5T_NATIVE_INT, dcpl, buf);
+ assert(ret >= 0);
+ H5Sclose(space);
+
+ /* dataset with non-fixed dimensions */
+ maxdims[0] = F68a_DIM200;
+ maxdims[1] = F68a_DIM100;
+ space = H5Screate_simple(RANK, dims, maxdims);
+
+ ret = make_dset(fid, F68a_DSET_BTREE, space, H5T_NATIVE_INT, dcpl, buf);
+ assert(ret >= 0);
+ H5Sclose(space);
+
+#if defined (H5_HAVE_FILTER_DEFLATE)
+
+ /* dataset with fixed dimensions and filters */
+ /* remove the filters from the dcpl */
+ ret = H5Premove_filter(dcpl, H5Z_FILTER_ALL);
+ assert(ret >= 0);
+
+ /* set deflate data */
+ ret = H5Pset_deflate(dcpl, 9);
+ assert(ret >= 0);
+
+ space = H5Screate_simple(RANK, dims, NULL);
+ ret = make_dset(fid, F68a_DSET_FIXED_FILTER, space, H5T_NATIVE_INT, dcpl, buf);
+ assert(ret >= 0);
+
+ H5Sclose(space);
+#endif
+
+ H5Pclose(dcpl);
+ H5Fclose(fid);
+}
+
/*-------------------------------------------------------------------------
* Function: gent_packedbits
*
@@ -7050,9 +7145,8 @@ gent_packedbits(void)
valu8bits = (uint8_t) ~0u; /* all 1s */
for(i = 0; i < dims[0]; i++){
dsetu8[i][0] = valu8bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dsetu8[i][j] = dsetu8[i][j-1] << 1;
- }
valu8bits <<= 1;
}
@@ -7068,9 +7162,8 @@ gent_packedbits(void)
valu16bits = (uint16_t) ~0u; /* all 1s */
for(i = 0; i < dims[0]; i++){
dsetu16[i][0] = valu16bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dsetu16[i][j] = dsetu16[i][j-1] << 1;
- }
valu16bits <<= 1;
}
@@ -7086,9 +7179,8 @@ gent_packedbits(void)
valu32bits = (uint32_t) ~0u; /* all 1s */
for(i = 0; i < dims[0]; i++){
dsetu32[i][0] = valu32bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dsetu32[i][j] = dsetu32[i][j-1] << 1;
- }
valu32bits <<= 1;
}
@@ -7104,9 +7196,8 @@ gent_packedbits(void)
valu64bits = (uint64_t) ~0Lu; /* all 1s */
for(i = 0; i < dims[0]; i++){
dsetu64[i][0] = valu64bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dsetu64[i][j] = dsetu64[i][j-1] << 1;
- }
valu64bits <<= 1;
}
@@ -7122,9 +7213,8 @@ gent_packedbits(void)
val8bits = (int8_t) ~0; /* all 1s */
for(i = 0; i < dims[0]; i++){
dset8[i][0] = val8bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dset8[i][j] = dset8[i][j-1] << 1;
- }
val8bits <<= 1;
}
@@ -7140,9 +7230,8 @@ gent_packedbits(void)
val16bits = (int16_t) ~0; /* all 1s */
for(i = 0; i < dims[0]; i++){
dset16[i][0] = val16bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dset16[i][j] = dset16[i][j-1] << 1;
- }
val16bits <<= 1;
}
@@ -7158,9 +7247,8 @@ gent_packedbits(void)
val32bits = (int32_t) ~0; /* all 1s */
for(i = 0; i < dims[0]; i++){
dset32[i][0] = val32bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dset32[i][j] = dset32[i][j-1] << 1;
- }
val32bits <<= 1;
}
@@ -7176,9 +7264,8 @@ gent_packedbits(void)
val64bits = (int64_t) ~0L; /* all 1s */
for(i = 0; i < dims[0]; i++){
dset64[i][0] = val64bits;
- for(j = 1; j < dims[1]; j++) {
+ for(j = 1; j < dims[1]; j++)
dset64[i][j] = dset64[i][j-1] << 1;
- }
val64bits <<= 1;
}
@@ -7415,17 +7502,22 @@ gent_charsets(void)
const char *utf8_p_;
} CharSetInfo;
- hid_t charset_dtid = H5Tcreate( H5T_COMPOUND, sizeof( CharSetInfo ) );
- hid_t ascii_dtid = H5Tcreate( H5T_STRING, H5T_VARIABLE );
+ hid_t charset_dtid;
+ hid_t ascii_dtid;
hid_t utf8_dtid = H5Tcreate( H5T_STRING, H5T_VARIABLE );
const char * writeData[] = { "ascii", "utf8", };
sid = H5Screate_simple( 1, dim, NULL );
fid = H5Fcreate( FILE68, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT );
+
+ charset_dtid = H5Tcreate( H5T_COMPOUND, sizeof( CharSetInfo ) );
+
+ ascii_dtid = H5Tcreate( H5T_STRING, H5T_VARIABLE );
status = H5Tset_cset( ascii_dtid, H5T_CSET_ASCII );
HDassert(status >= 0);
H5Tinsert( charset_dtid, "ascii", HOFFSET(CharSetInfo, ascii_p_ ), ascii_dtid );
+ utf8_dtid = H5Tcreate( H5T_STRING, H5T_VARIABLE );
status = H5Tset_cset( utf8_dtid, H5T_CSET_UTF8 );
HDassert(status >= 0);
H5Tinsert( charset_dtid, "utf8", HOFFSET( CharSetInfo, utf8_p_ ), utf8_dtid );
@@ -9795,6 +9887,7 @@ int main(void)
gent_extlinks();
gent_fs_strategy_threshold();
gent_packedbits();
+ gent_dataset_idx();
gent_attr_intsize();
gent_charsets();
gent_compound_intsizes();
diff --git a/tools/h5format_convert/Makefile.am b/tools/h5format_convert/Makefile.am
new file mode 100644
index 0000000..d3aef7d
--- /dev/null
+++ b/tools/h5format_convert/Makefile.am
@@ -0,0 +1,49 @@
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+##
+## Makefile.am
+## Run automake to generate a Makefile.in from this file.
+#
+# HDF5 Library Makefile(.in)
+#
+
+include $(top_srcdir)/config/commence.am
+
+# Include src directory
+AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib
+
+#test script and program
+TEST_PROG=h5fc_gentest
+TEST_SCRIPT=testh5fc.sh
+
+check_PROGRAMS=$(TEST_PROG) h5fc_chk_idx
+check_SCRIPTS=$(TEST_SCRIPT)
+SCRIPT_DEPEND=h5format_convert$(EXEEXT)
+
+# These are our main targets, the tools
+bin_PROGRAMS=h5format_convert
+
+# Add h5format_convert specific linker flags here
+h5format_convert_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
+
+# Tell automake to clean h5redeploy script
+CHECK_CLEANFILES+=*.h5
+
+# These were generated by configure. Remove them only when distclean.
+DISTCLEANFILES=testh5fc.sh
+
+# All programs rely on hdf5 library and h5tools library
+LDADD=$(LIBH5TOOLS) $(LIBHDF5)
+
+include $(top_srcdir)/config/conclude.am
diff --git a/tools/h5format_convert/h5fc_chk_idx.c b/tools/h5format_convert/h5fc_chk_idx.c
new file mode 100644
index 0000000..3114379
--- /dev/null
+++ b/tools/h5format_convert/h5fc_chk_idx.c
@@ -0,0 +1,101 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A program to verify that the chunk indexing type of a dataset in a file
+ * is version 1 B-tree.
+ * This is to support the testing of the tool "h5format_convert".
+ */
+
+#include "hdf5.h"
+#include "H5private.h"
+#include "h5tools.h"
+
+static void usage(void);
+
+static void
+usage(void)
+{
+ HDfprintf(stdout, "Usage: h5fc_chk_idx file_name dataset_pathname\n");
+} /* usage() */
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To check that the chunk indexing type for the dataset in
+ * the file is version 1 B-tree.
+ *
+ * Return: 0 -- the indexing type is version 1 B-tree
+ * 1 -- otherwise
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ char *fname = NULL;
+ char *dname = NULL;
+ hid_t fid = -1;
+ hid_t did = -1;
+ H5D_chunk_index_t idx_type;
+
+ /* h5fc_chk_idx fname dname */
+ if(argc != 3) {
+ usage();
+ exit(EXIT_FAILURE);
+ }
+
+ /* Duplicate the file name & dataset name */
+ fname = strdup(argv[1]);
+ dname = strdup(argv[2]);
+
+ /* Try opening the file */
+ if((fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0) {
+ HDfprintf(stderr, "h5fc_chk_idx: unable to open the file\n");
+ return EXIT_FAILURE;
+ }
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0) {
+ HDfprintf(stderr, "h5fc_chk_idx: unable to open the dataset\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Get the dataset's chunk indexing type */
+ if(H5Dget_chunk_index_type(did, &idx_type) < 0) {
+ HDfprintf(stderr, "h5fc_chk_idx: unable to get chunk index type for the dataset\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0) {
+ HDfprintf(stderr, "h5fc_chk_idx: unable to close the dataset\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "h5fc_chk_idx_type: cannot close the file\n");
+ return EXIT_FAILURE;
+ }
+
+ /* Return success when the chunk indexing type is version 1 B-tree */
+ if(idx_type == H5D_CHUNK_IDX_BTREE)
+ return(EXIT_SUCCESS);
+ else {
+ HDfprintf(stderr, "Error: chunk indexing type is %d\n", idx_type);
+ return(EXIT_FAILURE);
+ }
+} /* main() */
diff --git a/tools/h5format_convert/h5fc_gentest.c b/tools/h5format_convert/h5fc_gentest.c
new file mode 100644
index 0000000..4dcc286
--- /dev/null
+++ b/tools/h5format_convert/h5fc_gentest.c
@@ -0,0 +1,635 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Generate the binary hdf5 files for the h5format_convert tests.
+ * Usage: just execute the program without any arguments will
+ * generate all the binary hdf5 files
+ *
+ * If you regenerate the test files (e.g., changing some code,
+ * trying it on a new platform, ...), you need to verify the correctness
+ * of the expected output and update the corresponding *.ddl files.
+ */
+#include "hdf5.h"
+
+#define GROUP "GROUP"
+
+#define OLD_V1_FILE "h5fc_v1.h5"
+#define DSET_NON_CHUNKED "DSET_NON_CHUNKED"
+#define DSET_BT1 "DSET_BT1"
+#define DSET_NDATA_BT1 "DSET_NDATA_BT1"
+
+#define LATEST_V3_FILE "h5fc_latest_v3.h5"
+#define DSET_EA "DSET_EA"
+#define DSET_NDATA_EA "DSET_NDATA_EA"
+#define DSET_BT2 "DSET_BT2"
+#define DSET_NDATA_BT2 "DSET_NDATA_BT2"
+#define DSET_FA "DSET_FA"
+#define DSET_NDATA_FA "DSET_NDATA_FA"
+#define DSET_NONE "DSET_NONE"
+#define DSET_NDATA_NONE "DSET_NDATA_NONE"
+
+#define NON_V3_FILE "h5fc_non_v3.h5"
+
+#define EDGE_V3_FILE "h5fc_edge_v3.h5"
+#define DSET_EDGE "DSET_EDGE"
+
+/*
+ * Function: gen_old()
+ *
+ * Create an old format file with:
+ * 1) 1 non-chunked dataset
+ * 2) 2 chunked datasets with version 1 B-tree chunk indexing type: with/without data
+ */
+static void
+gen_old(const char *fname)
+{
+ hid_t fid = -1; /* file id */
+ hid_t fcpl = -1;
+ hid_t gid = -1; /* group id */
+ hid_t sid = -1; /* space id */
+ hid_t dcpl = -1; /* dataset creation property id */
+ hid_t did1 = -1, did2 = -1; /* dataset id */
+ hsize_t dims1[1] = {10}; /* dataset dimension */
+ hsize_t dims2[2] = {4, 6}; /* dataset dimension */
+ hsize_t c_dims[2] = {2, 3}; /* chunk dimension */
+ int i; /* local index variable */
+ int buf[24]; /* data buffer */
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ goto error;
+
+ if(H5Pset_istore_k(fcpl, 64) < 0)
+ goto error;
+
+ /* Create file */
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, GROUP, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+
+ /*
+ * Create a non-chunked dataset
+ */
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(1, dims1, NULL)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did1 = H5Dcreate2(fid, DSET_NON_CHUNKED, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+
+ /*
+ * Create two chunked datasets with version 1 B-tree chunk indexing type
+ * (one with data, one without data)
+ */
+
+ /* Create data */
+ for(i = 0; i < 24; i++)
+ buf[i] = i;
+
+ /* Set chunk */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl, 2, c_dims) < 0)
+ goto error;
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(2, dims2, NULL)) < 0)
+ goto error;
+
+ /* Create the 2 datasets */
+ if((did1 = H5Dcreate2(fid, DSET_NDATA_BT1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((did2 = H5Dcreate2(gid, DSET_BT1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to one dataset */
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Dclose(did2) < 0)
+ goto error;
+
+ if(H5Gclose(gid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Gclose(gid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+} /* gen_old() */
+
+/*
+ * Function: gen_latest()
+ *
+ * Create a file with write+latest-format--this will result in v3 superblock+latest version support:
+ * 1) 2 chunked datasets with extensible array chunk indexing type (with/without data)
+ * 2) 2 chunked datasets with version 2 B-tree chunk indexing type (with/without data)
+ * 3) 2 chunked datasets with fixed array chunk indexing type (with/without data)
+ * 4) 2 chunked datasets with implicit array chunk indexing type (with/without data)
+ */
+static void
+gen_latest(const char *fname)
+{
+ hid_t fid = -1; /* file id */
+ hid_t fapl = -1; /* file access property list */
+ hid_t gid = -1; /* group id */
+ hid_t sid = -1; /* space id */
+ hid_t dcpl = -1; /* dataset creation property id */
+ hid_t did1 = -1, did2 = -1; /* dataset id */
+ hsize_t dims2[2] = {4, 6}; /* dataset dimension */
+ hsize_t max_dims[2]; /* maximum dataset dimension */
+ hsize_t c_dims[2] = {2, 3}; /* chunk dimension */
+ int i; /* local index variable */
+ int buf[24]; /* data buffer */
+
+ /* Create a new format file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ goto error;
+
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto error;
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, GROUP, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Set chunk */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl, 2, c_dims) < 0)
+ goto error;
+
+ /*
+ * Create 2 chunked datasets with extensible array chunk indexing type
+ * (one with data; one without data)
+ */
+
+ /* Create dataspace */
+ max_dims[0] = 10;
+ max_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the 2 datasets */
+ if((did1 = H5Dcreate2(gid, DSET_NDATA_EA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((did2 = H5Dcreate2(fid, DSET_EA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create data */
+ for(i = 0; i < 24; i++)
+ buf[i] = i;
+
+ /* Write to one dataset */
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Dclose(did2) < 0)
+ goto error;
+
+
+ /*
+ * Create 2 chunked datasets with version 2 B-tree chunk indexing type
+ * (one with data; one without data)
+ */
+
+ /* Create dataspace */
+ max_dims[0] = 10;
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the 2 datasets */
+ if((did1 = H5Dcreate2(fid, DSET_NDATA_BT2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((did2 = H5Dcreate2(gid, DSET_BT2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to one dataset */
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Dclose(did2) < 0)
+ goto error;
+
+ /*
+ * Create 2 chunked datasets with fixed array chunk indexing type
+ * (one with data; one without data)
+ */
+
+ /* Create dataspace */
+ max_dims[0] = 20;
+ max_dims[1] = 10;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the datasets */
+ if((did1 = H5Dcreate2(fid, DSET_FA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((did2 = H5Dcreate2(gid, DSET_NDATA_FA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Dclose(did2) < 0)
+ goto error;
+
+
+ /*
+ * Create 2 chunked datasets with implicit chunk indexing type
+ * (one with data; one without data)
+ */
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(2, dims2, NULL)) < 0)
+ goto error;
+
+ /* Set early allocation */
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
+ goto error;
+
+ /* Create the 2 datasets */
+ if((did1 = H5Dcreate2(fid, DSET_NONE, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((did2 = H5Dcreate2(gid, DSET_NDATA_NONE, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to one dataset */
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Dclose(did2) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Gclose(gid);
+ H5Fclose(fid);
+ H5Pclose(fapl);
+ } H5E_END_TRY;
+
+} /* gen_latest() */
+
+/*
+ * Function: gen_non()
+ *
+ * Create a file with SWMR write+non-latest-format--this will result in v3 superbock+latest version support:
+ * 1) 1 chunked dataset with extensible array chunk indexing type (without data)
+ * 2) 1 chunked dataset with version 2 B-tree chunk indexing type (with data)
+ * Re-open the file with write+non-latest-format and create:
+ * 3) 1 chunked dataset with version 2 B-tree chunk indexing type (without data)
+ * 4) 1 chunked dataset with extensible array indexing type (with data)
+ * 5) 1 non-chunked dataset
+ */
+static void
+gen_non(const char *fname)
+{
+ hid_t fid = -1; /* file id */
+ hid_t gid = -1; /* group id */
+ hid_t sid = -1; /* space id */
+ hid_t dcpl = -1; /* dataset creation property id */
+ hid_t did1 = -1, did2 = -1; /* dataset id */
+ hsize_t dims1[1] = {10}; /* dataset dimension */
+ hsize_t dims2[2] = {4, 6}; /* dataset dimension */
+ hsize_t max_dims[2]; /* maximum dataset dimension */
+ hsize_t c_dims[2] = {2, 3}; /* chunk dimension */
+ int i; /* local index variable */
+ int buf[24]; /* data buffer */
+
+ /* Create a new file with SWMR_WRITE + non-latest-format */
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create a group */
+ if((gid = H5Gcreate2(fid, GROUP, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create data */
+ for(i = 0; i < 24; i++)
+ buf[i] = i;
+
+ /* Set chunk */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl, 2, c_dims) < 0)
+ goto error;
+
+ /*
+ * Create a chunked dataset with extensible array chunk indexing type (without data)
+ */
+
+ /* Create dataspace */
+ max_dims[0] = 10;
+ max_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did1 = H5Dcreate2(fid, DSET_NDATA_EA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+
+ /*
+ * Create a chunked dataset with version 2 B-tree chunk indexing type (with data)
+ */
+
+ /* Create dataspace */
+ max_dims[0] = 10;
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did1 = H5Dcreate2(gid, DSET_BT2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+ /* Re-open the file with old format */
+ if((fid = H5Fopen(fname, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Open the group */
+ if((gid = H5Gopen2(fid, GROUP, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /*
+ * Create a dataset with version 2 B-btree chunk indexing type (without data)
+ */
+
+ /* Set chunk */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl, 2, c_dims) < 0)
+ goto error;
+
+ /* Create dataspace */
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did1 = H5Dcreate2(fid, DSET_NDATA_BT2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Close the dataspace */
+ if(H5Sclose(sid) < 0)
+ goto error;
+
+ /*
+ * Create a dataset with version extensible array chunk indexing type (with data) in the group
+ */
+
+ /* Create dataspace */
+ max_dims[0] = 10;
+ max_dims[1] = H5S_UNLIMITED;
+ if((sid = H5Screate_simple(2, dims2, max_dims)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did2 = H5Dcreate2(gid, DSET_EA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+ if(H5Dclose(did2) < 0)
+ goto error;
+
+ /*
+ * Create a non-chunked dataset in the group
+ */
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(1, dims1, NULL)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did1 = H5Dcreate2(gid, DSET_NON_CHUNKED, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did1) < 0)
+ goto error;
+
+ if(H5Gclose(gid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Dclose(did1);
+ H5Dclose(did2);
+ H5Gclose(gid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+} /* gen_non() */
+
+/*
+ * Function: gen_edge()
+ *
+ * Create a file with write+latest-format--this will result in v3 superblock+latest version support:
+ * A dataset: chunked, filtered, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS enabled
+ * (i.e. the dataset does not filter partial edge chunks)
+ */
+static void
+gen_edge(const char *fname)
+{
+ hid_t fid = -1; /* file id */
+ hid_t fapl = -1; /* file access property list */
+ hid_t sid = -1; /* dataspace id */
+ hid_t dcpl = -1; /* dataset creation property id */
+ hid_t did = -1; /* dataset id */
+ hsize_t dims2[2] = {12, 6}; /* Dataset dimensions */
+ hsize_t c_dims[2] = {5, 5}; /* Chunk dimensions */
+ float buf[12][6]; /* Buffer for writing data */
+ int i, j; /* local index variable */
+
+ /* Create a new format file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ goto error;
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto error;
+
+ /* Set chunk, filter, no-filter-edge-chunk */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl, 2, c_dims) < 0)
+ goto error;
+ if(H5Pset_deflate(dcpl, 9) < 0)
+ goto error;
+ if(H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0)
+ goto error;
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(2, dims2, NULL)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, DSET_EDGE, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create data */
+ for (i = 0; i< 12; i++)
+ for (j = 0; j< 6; j++)
+ buf[i][j] = 100.0F;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Pclose(fapl) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ H5Pclose(fapl);
+ } H5E_END_TRY;
+
+} /* gen_edge() */
+
+int main(void)
+{
+ /* Generate an old format file with v1 superbock */
+ gen_old(OLD_V1_FILE);
+
+ /* Generate a latest-format file with v3 superblock */
+ gen_latest(LATEST_V3_FILE);
+
+ /* Generate a non-latest-format file with v3 superblock */
+ gen_non(NON_V3_FILE);
+
+ /* Generate a new format file with a no-filter-edge-chunk dataset for testing */
+ gen_edge(EDGE_V3_FILE);
+ return 0;
+}
diff --git a/tools/h5format_convert/h5format_convert.c b/tools/h5format_convert/h5format_convert.c
new file mode 100644
index 0000000..7686acc
--- /dev/null
+++ b/tools/h5format_convert/h5format_convert.c
@@ -0,0 +1,438 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Vailin Choi; Feb 2015
+ */
+
+
+/*
+ * We include the private header file so we can get to the uniform
+ * programming environment it declares.
+ * HDF5 API functions (except for H5G_basename())
+ */
+#include "H5private.h"
+#include "h5tools.h"
+#include "h5tools_utils.h"
+#include "h5trav.h"
+
+/* Name of tool */
+#define PROGRAMNAME "h5format_convert"
+
+static char *fname_g = NULL;
+static char *dname_g = NULL;
+static int dset_g = FALSE;
+static int noop_g = FALSE;
+static int verbose_g = 0;
+
+/*
+ * Command-line options: The user can specify short or long-named
+ * parameters.
+ */
+static const char *s_opts = "hVvd:n";
+static struct long_options l_opts[] = {
+ { "help", no_arg, 'h' },
+ { "hel", no_arg, 'h'},
+ { "he", no_arg, 'h'},
+ { "version", no_arg, 'V' },
+ { "version", no_arg, 'V' },
+ { "versio", no_arg, 'V' },
+ { "versi", no_arg, 'V' },
+ { "vers", no_arg, 'V' },
+ { "verbose", no_arg, 'v' },
+ { "verbos", no_arg, 'v' },
+ { "verbo", no_arg, 'v' },
+ { "verb", no_arg, 'v' },
+ { "dname", require_arg, 'd' },
+ { "dnam", require_arg, 'd' },
+ { "dna", require_arg, 'd' },
+ { "dn", require_arg, 'd' },
+ { "noop", no_arg, 'n' },
+ { "noo", no_arg, 'n' },
+ { "no", no_arg, 'n' },
+ { NULL, 0, '\0' }
+};
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: print usage
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void usage(const char *prog)
+{
+ printf("usage: %s [OPTIONS] file_name\n", prog);
+ printf(" OPTIONS\n");
+ printf(" -h, --help Print a usage message and exit\n");
+ printf(" -V, --version Print version number and exit\n");
+ printf(" -v, --verbose Turn on verbose mode\n");
+ printf(" -d dname, --dname=dataset_name Pathname for the dataset\n");
+ printf(" -n, --noop Perform all the steps except the actual conversion\n");
+ printf("\n");
+ printf("Examples of use:\n");
+ printf("\n");
+ printf("h5format_convert -d /group/dataset file_name\n");
+ printf(" Convert the chunk indexing type to version 1 B-tree\n");
+ printf(" for the chunked dataset </group/dataset> in the HDF5 file <file_name>.\n");
+ printf("\n");
+ printf("h5format_convert file_name\n");
+ printf(" Convert the chunk indexing type to version 1 B-tree\n");
+ printf(" for all the chunked datasets in the HDF5 file <file_name>.\n");
+ printf("\n");
+ printf("h5format_convert -n -d /group/dataset file_name\n");
+ printf(" Go through all the steps except the actual conversion when \n");
+ printf(" converting the chunked dataset </group/dataset> in the HDF5 file <file_name>.\n");
+} /* usage() */
+
+/*-------------------------------------------------------------------------
+ * Function: parse_command_line
+ *
+ * Purpose: parse command line input
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+parse_command_line(int argc, const char **argv)
+{
+ int opt;
+
+ /* no arguments */
+ if (argc == 1) {
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ }
+
+ /* parse command line options */
+ while ((opt = get_option(argc, argv, s_opts, l_opts)) != EOF) {
+ switch((char) opt) {
+ case 'h':
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto error;
+
+ case 'V':
+ print_version(h5tools_getprogname());
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto error;
+
+ case 'v':
+ verbose_g = TRUE;
+ break;
+
+ case 'd': /* -d dname */
+ if(opt_arg != NULL && *opt_arg)
+ /* if(opt_arg != NULL)*/
+ dname_g = HDstrdup(opt_arg);
+ if(dname_g == NULL) {
+ h5tools_setstatus(EXIT_FAILURE);
+ error_msg("No dataset name\n", opt_arg);
+ usage(h5tools_getprogname());
+ goto error;
+ }
+ dset_g = TRUE;
+ break;
+
+ case 'n': /* -n */
+ noop_g = TRUE;
+ break;
+
+ default:
+ h5tools_setstatus(EXIT_FAILURE);
+ usage(h5tools_getprogname());
+ goto error;
+ break;
+ } /* switch */
+ } /* while */
+
+ if (argc <= opt_ind) {
+ error_msg("missing file name\n");
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ }
+
+ fname_g = HDstrdup(argv[opt_ind]);
+
+ return(0);
+
+error:
+ return(-1); ;
+} /* parse_command_line() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: leave
+ *
+ * Purpose: Close HDF5
+ *
+ * Return: Does not return
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+leave(int ret)
+{
+ h5tools_close();
+
+ HDexit(ret);
+} /* leave() */
+
+/*-------------------------------------------------------------------------
+ * Function: convert()
+ *
+ * Purpose: To change the chunk indexing type for the dataset to version 1 B-tree.
+ * -- the dataset has to be chunked
+ * -- the dataset's chunk indexing type is not already version 1 B-tree.
+ * If the above conditions are not fulfilled, the tool will not perform
+ * the conversion but will exit with success.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+convert(hid_t fid, const char *dname)
+{
+ hid_t dcpl = -1;
+ hid_t did = -1;
+ H5D_layout_t layout_type;
+ H5D_chunk_index_t idx_type;
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0) {
+ error_msg("unable to open dataset \"%s\"\n", dname);
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+
+ } else if(verbose_g)
+ printf("Open the dataset\n");
+
+ /* Get the dataset's creation property list */
+ if((dcpl = H5Dget_create_plist(did)) < 0) {
+ error_msg("unable to get the dataset creation property list\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ }
+
+ /* Get the dataset's layout */
+ if((layout_type = H5Pget_layout(dcpl)) < 0) {
+ error_msg("unable to get the dataset layout type\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+
+ } else if(verbose_g)
+ printf("Retrieve the dataset's layout\n");
+
+ /* No further action if not a chunked dataset */
+ if(layout_type != H5D_CHUNKED) {
+ if(verbose_g)
+ printf("Dataset is not chunked: no further action\n");
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto done;
+
+ } else if(verbose_g)
+ printf("Verify the dataset is a chunked dataset\n");
+
+ /* Get the dataset's chunk indexing type */
+ if(H5Dget_chunk_index_type(did, &idx_type) < 0) {
+ error_msg("unable to get the chunk indexing type for \"%s\"\n", dname);
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+
+ } else if(verbose_g)
+ printf("Retrieve the dataset's chunk indexing type\n");
+
+ /* No further action if the chunk indexing type is already version 1 B-tree */
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ if(verbose_g)
+ printf("Chunk indexing type is already version 1 B-tree: no further action\n");
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto done;
+
+ } else if (verbose_g)
+ printf("Verify the dataset's chunk indexing type is not version 1 B-tree\n");
+
+ /* No further action if it is a noop */
+ if(noop_g) {
+ if(verbose_g)
+ printf("Not converting the dataset\n");
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto done;
+ }
+
+ if(verbose_g)
+ printf("Converting the dataset...\n");
+
+ /* Convert the dataset's chunk indexing type to version 1 B-tree */
+ if(H5Dformat_convert(did) < 0) {
+ error_msg("unable to convert chunk indexing for \"%s\"\n", dname);
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+
+ } else if(verbose_g)
+ printf("Done\n");
+
+done:
+ /* Close the dataset */
+ if(H5Dclose(did) < 0) {
+ error_msg("unable to close dataset \"%s\"\n", dname);
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ } else if(verbose_g)
+ printf("Close the dataset\n");
+
+ /* Close the dataset creation property list */
+ if(H5Pclose(dcpl) < 0) {
+ error_msg("unable to close dataset creation property list\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ } else if(verbose_g)
+ printf("Close the dataset creation property list\n");
+
+ return(0);
+
+error:
+ if(verbose_g)
+ printf("Error encountered\n");
+
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(did);
+ } H5E_END_TRY;
+
+ return(-1);
+
+} /* convert() */
+
+/*-------------------------------------------------------------------------
+ * Function: convert_dsets_cb()
+ *
+ * Purpose: The callback routine from the traversal to convert the
+ * chunk indexing type of the dataset object.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *-------------------------------------------------------------------------
+ */
+static int
+convert_dsets_cb(const char *path, const H5O_info_t *oi, const char *already_visited, void *_fid)
+{
+ hid_t fid = *(hid_t *)_fid;
+
+ /* If the object has already been seen then just return */
+ if(NULL == already_visited) {
+
+ if(oi->type == H5O_TYPE_DATASET) {
+ if(verbose_g)
+ printf("Going to process dataset:%s...\n", path);
+ if(convert(fid, path) < 0)
+ goto error;
+ }
+
+ } /* end if */
+
+ return 0;
+
+error:
+ return -1;
+
+} /* end convert_dsets_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To convert the chunk indexing type of a dataset in a file to
+ * version 1 B-tree.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, const char *argv[])
+{
+ H5E_auto2_t func;
+ void *edata;
+ hid_t fid = -1;
+
+ h5tools_setprogname(PROGRAMNAME);
+ h5tools_setstatus(EXIT_SUCCESS);
+
+ /* Disable error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &func, &edata);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ /* Initialize h5tools lib */
+ h5tools_init();
+
+ /* Parse command line options */
+ if(parse_command_line(argc, argv) < 0)
+ goto done;
+ else if(verbose_g)
+ printf("Process command line options\n");
+
+ if(noop_g && verbose_g)
+ printf("It is noop...\n");
+
+ /* Open the HDF5 file */
+ if((fid = h5tools_fopen(fname_g, H5F_ACC_RDWR, H5P_DEFAULT, NULL, NULL, 0)) < 0) {
+ error_msg("unable to open file \"%s\"\n", fname_g);
+ h5tools_setstatus(EXIT_FAILURE);
+ goto done;
+ } else if(verbose_g)
+ printf("Open the file %s\n", fname_g);
+
+ if(dset_g) { /* Convert a specified dataset in the file */
+ if(verbose_g)
+ printf("Going to process dataset: %s...\n", dname_g);
+ if(convert(fid, dname_g) < 0)
+ goto done;
+ } else { /* Convert all datasets in the file */
+ if(verbose_g)
+ printf("Processing all datasets in the file...\n");
+ if(h5trav_visit(fid, "/", TRUE, TRUE, convert_dsets_cb, NULL, &fid) < 0)
+ goto done;
+ }
+
+done:
+ /* Close the file */
+ if(fid >= 0) {
+ if(H5Fclose(fid) < 0) {
+ error_msg("unable to close file \"%s\"\n", fname_g);
+ h5tools_setstatus(EXIT_FAILURE);
+ } else if(verbose_g)
+ printf("Close the file\n");
+ }
+
+ if(fname_g)
+ HDfree(fname_g);
+ if(dname_g)
+ HDfree(dname_g);
+
+ H5Eset_auto2(H5E_DEFAULT, func, edata);
+ leave(h5tools_getstatus());
+
+} /* end main() */
diff --git a/tools/h5format_convert/testfiles/h5fc_d_file.ddl b/tools/h5format_convert/testfiles/h5fc_d_file.ddl
new file mode 100644
index 0000000..3641a4f
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_d_file.ddl
@@ -0,0 +1,22 @@
+usage: h5format_convert [OPTIONS] file_name
+ OPTIONS
+ -h, --help Print a usage message and exit
+ -V, --version Print version number and exit
+ -v, --verbose Turn on verbose mode
+ -d dname, --dname=dataset_name Pathname for the dataset
+ -n, --noop Perform all the steps except the actual conversion
+
+Examples of use:
+
+h5format_convert -d /group/dataset file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for the chunked dataset </group/dataset> in the HDF5 file <file_name>.
+
+h5format_convert file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for all the chunked datasets in the HDF5 file <file_name>.
+
+h5format_convert -n -d /group/dataset file_name
+ Go through all the steps except the actual conversion when
+ converting the chunked dataset </group/dataset> in the HDF5 file <file_name>.
+h5format_convert error: missing file name
diff --git a/tools/h5format_convert/testfiles/h5fc_dname.ddl b/tools/h5format_convert/testfiles/h5fc_dname.ddl
new file mode 100644
index 0000000..c391764
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_dname.ddl
@@ -0,0 +1,22 @@
+usage: h5format_convert [OPTIONS] file_name
+ OPTIONS
+ -h, --help Print a usage message and exit
+ -V, --version Print version number and exit
+ -v, --verbose Turn on verbose mode
+ -d dname, --dname=dataset_name Pathname for the dataset
+ -n, --noop Perform all the steps except the actual conversion
+
+Examples of use:
+
+h5format_convert -d /group/dataset file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for the chunked dataset </group/dataset> in the HDF5 file <file_name>.
+
+h5format_convert file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for all the chunked datasets in the HDF5 file <file_name>.
+
+h5format_convert -n -d /group/dataset file_name
+ Go through all the steps except the actual conversion when
+ converting the chunked dataset </group/dataset> in the HDF5 file <file_name>.
+h5format_convert error: No dataset name
diff --git a/tools/h5format_convert/testfiles/h5fc_edge_v3.h5 b/tools/h5format_convert/testfiles/h5fc_edge_v3.h5
new file mode 100644
index 0000000..debeda4
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_edge_v3.h5
Binary files differ
diff --git a/tools/h5format_convert/testfiles/h5fc_help.ddl b/tools/h5format_convert/testfiles/h5fc_help.ddl
new file mode 100644
index 0000000..9081ab8
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_help.ddl
@@ -0,0 +1,21 @@
+usage: h5format_convert [OPTIONS] file_name
+ OPTIONS
+ -h, --help Print a usage message and exit
+ -V, --version Print version number and exit
+ -v, --verbose Turn on verbose mode
+ -d dname, --dname=dataset_name Pathname for the dataset
+ -n, --noop Perform all the steps except the actual conversion
+
+Examples of use:
+
+h5format_convert -d /group/dataset file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for the chunked dataset </group/dataset> in the HDF5 file <file_name>.
+
+h5format_convert file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for all the chunked datasets in the HDF5 file <file_name>.
+
+h5format_convert -n -d /group/dataset file_name
+ Go through all the steps except the actual conversion when
+ converting the chunked dataset </group/dataset> in the HDF5 file <file_name>.
diff --git a/tools/h5format_convert/testfiles/h5fc_latest_v3.h5 b/tools/h5format_convert/testfiles/h5fc_latest_v3.h5
new file mode 100644
index 0000000..f7de743
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_latest_v3.h5
Binary files differ
diff --git a/tools/h5format_convert/testfiles/h5fc_non_v3.h5 b/tools/h5format_convert/testfiles/h5fc_non_v3.h5
new file mode 100644
index 0000000..b1bffa8
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_non_v3.h5
Binary files differ
diff --git a/tools/h5format_convert/testfiles/h5fc_nonexistdset_file.ddl b/tools/h5format_convert/testfiles/h5fc_nonexistdset_file.ddl
new file mode 100644
index 0000000..39450c0
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_nonexistdset_file.ddl
@@ -0,0 +1 @@
+h5format_convert error: unable to open dataset "nonexist"
diff --git a/tools/h5format_convert/testfiles/h5fc_nonexistfile.ddl b/tools/h5format_convert/testfiles/h5fc_nonexistfile.ddl
new file mode 100644
index 0000000..706ea9d
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_nonexistfile.ddl
@@ -0,0 +1 @@
+h5format_convert error: unable to open file "nonexist.h5"
diff --git a/tools/h5format_convert/testfiles/h5fc_nooption.ddl b/tools/h5format_convert/testfiles/h5fc_nooption.ddl
new file mode 100644
index 0000000..9081ab8
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_nooption.ddl
@@ -0,0 +1,21 @@
+usage: h5format_convert [OPTIONS] file_name
+ OPTIONS
+ -h, --help Print a usage message and exit
+ -V, --version Print version number and exit
+ -v, --verbose Turn on verbose mode
+ -d dname, --dname=dataset_name Pathname for the dataset
+ -n, --noop Perform all the steps except the actual conversion
+
+Examples of use:
+
+h5format_convert -d /group/dataset file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for the chunked dataset </group/dataset> in the HDF5 file <file_name>.
+
+h5format_convert file_name
+ Convert the chunk indexing type to version 1 B-tree
+ for all the chunked datasets in the HDF5 file <file_name>.
+
+h5format_convert -n -d /group/dataset file_name
+ Go through all the steps except the actual conversion when
+ converting the chunked dataset </group/dataset> in the HDF5 file <file_name>.
diff --git a/tools/h5format_convert/testfiles/h5fc_v1.h5 b/tools/h5format_convert/testfiles/h5fc_v1.h5
new file mode 100644
index 0000000..d3d66f8
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v1.h5
Binary files differ
diff --git a/tools/h5format_convert/testfiles/h5fc_v_all.ddl b/tools/h5format_convert/testfiles/h5fc_v_all.ddl
new file mode 100644
index 0000000..3f474fe
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v_all.ddl
@@ -0,0 +1,26 @@
+Process command line options
+Open the file tmp.h5
+Processing all datasets in the file...
+Going to process dataset:/DSET_NDATA_BT1...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Chunk indexing type is already version 1 B-tree: no further action
+Close the dataset
+Close the dataset creation property list
+Going to process dataset:/DSET_NON_CHUNKED...
+Open the dataset
+Retrieve the dataset's layout
+Dataset is not chunked: no further action
+Close the dataset
+Close the dataset creation property list
+Going to process dataset:/GROUP/DSET_BT1...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Chunk indexing type is already version 1 B-tree: no further action
+Close the dataset
+Close the dataset creation property list
+Close the file
diff --git a/tools/h5format_convert/testfiles/h5fc_v_bt1.ddl b/tools/h5format_convert/testfiles/h5fc_v_bt1.ddl
new file mode 100644
index 0000000..abb0a89
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v_bt1.ddl
@@ -0,0 +1,11 @@
+Process command line options
+Open the file tmp.h5
+Going to process dataset: /GROUP/DSET_BT1...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Chunk indexing type is already version 1 B-tree: no further action
+Close the dataset
+Close the dataset creation property list
+Close the file
diff --git a/tools/h5format_convert/testfiles/h5fc_v_n_1d.ddl b/tools/h5format_convert/testfiles/h5fc_v_n_1d.ddl
new file mode 100644
index 0000000..a26dc66
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v_n_1d.ddl
@@ -0,0 +1,13 @@
+Process command line options
+It is noop...
+Open the file tmp.h5
+Going to process dataset: /DSET_EA...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Verify the dataset's chunk indexing type is not version 1 B-tree
+Not converting the dataset
+Close the dataset
+Close the dataset creation property list
+Close the file
diff --git a/tools/h5format_convert/testfiles/h5fc_v_n_all.ddl b/tools/h5format_convert/testfiles/h5fc_v_n_all.ddl
new file mode 100644
index 0000000..76c70ee
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v_n_all.ddl
@@ -0,0 +1,47 @@
+Process command line options
+It is noop...
+Open the file tmp.h5
+Processing all datasets in the file...
+Going to process dataset:/DSET_NDATA_BT2...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Verify the dataset's chunk indexing type is not version 1 B-tree
+Not converting the dataset
+Close the dataset
+Close the dataset creation property list
+Going to process dataset:/DSET_NDATA_EA...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Verify the dataset's chunk indexing type is not version 1 B-tree
+Not converting the dataset
+Close the dataset
+Close the dataset creation property list
+Going to process dataset:/GROUP/DSET_BT2...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Verify the dataset's chunk indexing type is not version 1 B-tree
+Not converting the dataset
+Close the dataset
+Close the dataset creation property list
+Going to process dataset:/GROUP/DSET_EA...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Verify the dataset's chunk indexing type is not version 1 B-tree
+Not converting the dataset
+Close the dataset
+Close the dataset creation property list
+Going to process dataset:/GROUP/DSET_NON_CHUNKED...
+Open the dataset
+Retrieve the dataset's layout
+Dataset is not chunked: no further action
+Close the dataset
+Close the dataset creation property list
+Close the file
diff --git a/tools/h5format_convert/testfiles/h5fc_v_ndata_bt1.ddl b/tools/h5format_convert/testfiles/h5fc_v_ndata_bt1.ddl
new file mode 100644
index 0000000..86081f3
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v_ndata_bt1.ddl
@@ -0,0 +1,12 @@
+Process command line options
+It is noop...
+Open the file tmp.h5
+Going to process dataset: /DSET_NDATA_BT1...
+Open the dataset
+Retrieve the dataset's layout
+Verify the dataset is a chunked dataset
+Retrieve the dataset's chunk indexing type
+Chunk indexing type is already version 1 B-tree: no further action
+Close the dataset
+Close the dataset creation property list
+Close the file
diff --git a/tools/h5format_convert/testfiles/h5fc_v_non_chunked.ddl b/tools/h5format_convert/testfiles/h5fc_v_non_chunked.ddl
new file mode 100644
index 0000000..baba0e4
--- /dev/null
+++ b/tools/h5format_convert/testfiles/h5fc_v_non_chunked.ddl
@@ -0,0 +1,9 @@
+Process command line options
+Open the file tmp.h5
+Going to process dataset: /DSET_NON_CHUNKED...
+Open the dataset
+Retrieve the dataset's layout
+Dataset is not chunked: no further action
+Close the dataset
+Close the dataset creation property list
+Close the file
diff --git a/tools/h5format_convert/testh5fc.sh.in b/tools/h5format_convert/testh5fc.sh.in
new file mode 100644
index 0000000..dc5aa48
--- /dev/null
+++ b/tools/h5format_convert/testh5fc.sh.in
@@ -0,0 +1,400 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the h5format_convert tool
+#
+#
+
+srcdir=@srcdir@
+
+# Determine which filters are available
+USE_FILTER_SZIP="@USE_FILTER_SZIP@"
+USE_FILTER_DEFLATE="@USE_FILTER_DEFLATE@"
+USE_FILTER_SHUFFLE="@USE_FILTER_SHUFFLE@"
+USE_FILTER_FLETCHER32="@USE_FILTER_FLETCHER32@"
+USE_FILTER_NBIT="@USE_FILTER_NBIT@"
+USE_FILTER_SCALEOFFSET="@USE_FILTER_SCALEOFFSET@"
+
+TESTNAME=h5format_convert
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+
+FORMCONV=h5format_convert # The tool name
+FORMCONV_BIN=`pwd`/$FORMCONV # The path of the tool binary
+
+CHK_IDX=h5fc_chk_idx # The program name
+CHK_IDX_BIN=`pwd`/$CHK_IDX # The program to verify the chunk indexing type is v1 B-tree
+
+RM='rm -rf'
+CMP='cmp -s'
+DIFF='diff -c'
+CP='cp'
+DIRNAME='dirname'
+LS='ls'
+AWK='awk'
+
+nerrors=0
+verbose=yes
+
+# source dirs
+SRC_TOOLS="$srcdir/.."
+SRC_TOOLS_TESTFILES="$SRC_TOOLS/testfiles"
+
+# testfiles source dirs for tools
+SRC_H5LS_TESTFILES="$SRC_TOOLS_TESTFILES"
+SRC_H5DUMP_TESTFILES="$SRC_TOOLS_TESTFILES"
+SRC_H5DIFF_TESTFILES="$SRC_TOOLS/h5diff/testfiles"
+SRC_H5COPY_TESTFILES="$SRC_TOOLS/h5copy/testfiles"
+SRC_H5REPACK_TESTFILES="$SRC_TOOLS/h5repack/testfiles"
+SRC_H5JAM_TESTFILES="$SRC_TOOLS/h5jam/testfiles"
+SRC_H5STAT_TESTFILES="$SRC_TOOLS/h5stat/testfiles"
+SRC_H5IMPORT_TESTFILES="$SRC_TOOLS/h5import/testfiles"
+SRC_H5FORMCONV_TESTFILES="$SRC_TOOLS/h5format_convert/testfiles"
+
+TESTDIR=./testfiles
+test -d $TESTDIR || mkdir $TESTDIR
+
+# Copy the testfile to a temporary file for testing as h5format_convert is changing the file in place
+TMPFILE=tmp.h5
+
+######################################################################
+# test files
+# --------------------------------------------------------------------
+# All the test files copy from source directory to test directory
+# NOTE: Keep this framework to add/remove test files.
+# Any test files from other tools can be used in this framework.
+# This list are also used for checking exist.
+# Comment '#' without space can be used.
+# --------------------------------------------------------------------
+LIST_HDF5_TEST_FILES="
+$SRC_H5FORMCONV_TESTFILES/h5fc_v1.h5
+$SRC_H5FORMCONV_TESTFILES/h5fc_latest_v3.h5
+$SRC_H5FORMCONV_TESTFILES/h5fc_non_v3.h5
+$SRC_H5FORMCONV_TESTFILES/h5fc_edge_v3.h5
+"
+
+LIST_OTHER_TEST_FILES="
+$SRC_H5FORMCONV_TESTFILES/h5fc_help.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_nooption.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_nonexistfile.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_d_file.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_dname.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_nonexistdset_file.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_v_non_chunked.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_v_bt1.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_v_ndata_bt1.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_v_all.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_v_n_1d.ddl
+$SRC_H5FORMCONV_TESTFILES/h5fc_v_n_all.ddl
+"
+
+#
+# copy test files and expected output files from source dirs to test dir
+#
+COPY_TESTFILES="$LIST_HDF5_TEST_FILES $LIST_OTHER_TEST_FILES"
+
+COPY_TESTFILES_TO_TESTDIR()
+{
+ # copy test files. Used -f to make sure get a new copy
+ for tstfile in $COPY_TESTFILES
+ do
+ # ignore '#' comment
+ echo $tstfile | tr -d ' ' | grep '^#' > /dev/null
+ RET=$?
+ if [ $RET -eq 1 ]; then
+ # skip cp if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $CP -f $tstfile $TESTDIR
+ if [ $? -ne 0 ]; then
+ echo "Error: FAILED to copy $tstfile ."
+
+ # Comment out this to CREATE expected file
+ exit $EXIT_FAILURE
+ fi
+ fi
+ fi
+ done
+}
+
+CLEAN_TESTFILES_AND_TESTDIR()
+{
+ # skip rm if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $RM $TESTDIR
+ else
+ $RM $TESTDIR/$TMPFILE
+ fi
+}
+
+# Print a line-line message left justified in a field of 80 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-80 | tr -d '\012'
+}
+
+# Run a test and print PASS or *FAIL*. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display the
+# difference between the actual output and the expected output. The
+# expected output is given as the first argument to this function and
+# the actual output file is calculated by replacing the `.ddl' with
+# `.out'. The actual output is not removed if $HDF5_NOCLEANUP has a
+# non-zero value.
+#
+# $1: expected output
+# $2: the test file name
+# --fname might be empty or fname does not exist
+# --fname is copied to a temporary file for testing
+# $3 to at most $6--options to the tool such as:
+# -d dname or --dname=dname
+# -v or --verbose
+# -n or --noop
+TOOLTEST_OUT() {
+ # Prepare expected and actual output
+ expect="$TESTDIR/$1"
+ actual="$TESTDIR/`basename $1 .ddl`.out"
+ actual_err="$TESTDIR/`basename $1 .ddl`.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+
+ # Prepare the test file
+ $RM $TESTDIR/$TMPFILE
+ TFILE=$2
+ if [ ! -z "$2" ] && [ -e $TESTDIR/$2 ] ; then
+ $CP $TESTDIR/$2 $TESTDIR/$TMPFILE
+ TFILE=$TMPFILE
+ fi
+
+ # Run test.
+ TESTING $FORMCONV $3 $4 $5 $6 $2
+ (
+ cd $TESTDIR
+ $RUNSERIAL $FORMCONV_BIN $3 $4 $5 $6 $TFILE
+ ) >$actual 2>$actual_err
+ cp $actual $actual_sav
+ cp $actual_err $actual_err_sav
+ cat $actual_err >> $actual
+
+ # Compare output
+ if $CMP $expect $actual; then
+ echo " PASSED"
+ else
+ echo "*FAILED*"
+ echo " Expected result (*.ddl) differs from actual result (*.out)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ $RM $actual $actual_err
+ $RM $actual $actual_err $actual_sav $actual_err_sav
+ fi
+}
+
+# To check that the tool exits success, no output
+# Assume all short options
+# $1 is the test file name
+# --fname exists
+# --fname is copied to a temporary file for testing
+# $2 to at most $4--options to the tool such as:
+# -d dname
+# -n
+TOOLTEST() {
+ TESTING $FORMCONV $2 $3 $4 $1
+ $RM $TESTDIR/$TMPFILE
+ $CP $TESTDIR/$1 $TESTDIR/$TMPFILE
+ $RUNSERIAL $FORMCONV_BIN $2 $3 $4 $TESTDIR/$TMPFILE
+ exitcode=$?
+ if [ $exitcode -ne 0 ]; then
+ echo "*FAILED*"
+ echo " The tool exits failure"
+ nerrors="`expr $nerrors + 1`"
+ else
+ echo " PASSED"
+ fi
+}
+
+CHECKING() {
+ SPACES=" "
+ echo "Verifying $* $SPACES" | cut -c1-80 | tr -d '\012'
+}
+
+# $1 dataset name
+# Assume $TESTDIR/$TMPFILE is the converted test file
+IDX_CHECK() {
+ CHECKING $1
+ $RUNSERIAL $CHK_IDX_BIN $TESTDIR/$TMPFILE $1
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ echo " PASSED"
+ else
+ echo "*FAILED*"
+ echo " The chunk indexing type is not correct"
+ nerrors="`expr $nerrors + 1`"
+ fi
+}
+
+# Print a "SKIP" message
+SKIP() {
+ TESTING $STAT $@
+ echo " -SKIP-"
+}
+
+
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+# prepare for test
+COPY_TESTFILES_TO_TESTDIR
+#
+#
+#
+# h5format_convert --help
+# h5format_convert (no options)
+# h5format_convert nonexist.h5 (no options, file does not exist)
+TOOLTEST_OUT h5fc_help.ddl '' --help
+TOOLTEST_OUT h5fc_nooption.ddl ''
+TOOLTEST_OUT h5fc_nonexistfile.ddl nonexist.h5
+#
+#
+# h5format_convert -d h5fc_v1.h5 (just -d option, file exists)
+# h5format_convert --dname h5fc_v1.h5 (just --dname option, file exists)
+# h5format_convert --dname (just --dname option)
+# h5format_convert --dname=nonexist h5fc_v1.h5 (dataset does not exist, file exists)
+TOOLTEST_OUT h5fc_d_file.ddl h5fc_v1.h5 -d
+TOOLTEST_OUT h5fc_d_file.ddl h5fc_v1.h5 --dname
+TOOLTEST_OUT h5fc_dname.ddl '' --dname
+TOOLTEST_OUT h5fc_nonexistdset_file.ddl h5fc_v1.h5 --dname=nonexist
+#
+#
+#
+# h5format_convert -d /DSET_NON_CHUNKED -v h5fc_v1.h5 (verbose, non-chunked dataset)
+# h5format_convert -d /GROUP/DSET_BT1 --verbose h5fc_v1.h5 (verbose, bt1 dataset)
+# h5format_convert -d /DSET_NDATA_BT1 -v -n h5fc_v1.h5 (verbose, noop, bt1+nodata dataset)
+# h5format_convert -v h5fc_v1.h5 (verbose, all datasets)
+TOOLTEST_OUT h5fc_v_non_chunked.ddl h5fc_v1.h5 -d /DSET_NON_CHUNKED -v
+TOOLTEST_OUT h5fc_v_bt1.ddl h5fc_v1.h5 -d /GROUP/DSET_BT1 --verbose
+TOOLTEST_OUT h5fc_v_ndata_bt1.ddl h5fc_v1.h5 -d /DSET_NDATA_BT1 -v -n
+TOOLTEST_OUT h5fc_v_all.ddl h5fc_v1.h5 -v
+#
+#
+#
+# h5format_convert -d /DSET_EA -v -n h5fc_latest_v3.h5 (verbose, noop, one ea dataset)
+# h5format_convert -v -n h5fc_non_v3.h5 (verbose, noop, all datasets)
+TOOLTEST_OUT h5fc_v_n_1d.ddl h5fc_latest_v3.h5 -d /DSET_EA -v -n
+TOOLTEST_OUT h5fc_v_n_all.ddl h5fc_non_v3.h5 -v -n
+#
+#
+#
+# No output from tests
+# 1) Use the tool to convert the dataset
+# 2) Verify the chunk indexing type is correct
+# h5format_convert -d /DSET_EA h5fc_latest_v3.h5
+# h5format_convert -d /GROUP/DSET_NDATA_EA h5fc_latest_v3.h5
+# h5format_convert -d /GROUP/DSET_BT2 h5fc_latest_v3.h5
+# h5format_convert -d /DSET_NDATA_BT2 h5fc_latest_v3.h5
+# h5format_convert -d /DSET_FA h5fc_latest_v3.h5
+# h5format_convert -d /GROUP/DSET_FA h5fc_latest_v3.h5
+# h5format_convert -d /DSET_NONE h5fc_latest_v3.h5
+# h5format_convert -d /GROUP/DSET_NONE h5fc_latest_v3.h5
+TOOLTEST h5fc_latest_v3.h5 -d /DSET_EA
+IDX_CHECK /DSET_EA
+#
+TOOLTEST h5fc_latest_v3.h5 -d /GROUP/DSET_NDATA_EA
+IDX_CHECK /GROUP/DSET_NDATA_EA
+#
+TOOLTEST h5fc_latest_v3.h5 -d /GROUP/DSET_BT2
+IDX_CHECK /GROUP/DSET_BT2
+#
+TOOLTEST h5fc_latest_v3.h5 -d /DSET_NDATA_BT2
+IDX_CHECK /DSET_NDATA_BT2
+#
+TOOLTEST h5fc_latest_v3.h5 -d /DSET_FA
+IDX_CHECK /DSET_FA
+#
+TOOLTEST h5fc_latest_v3.h5 -d /GROUP/DSET_NDATA_FA
+IDX_CHECK /GROUP/DSET_NDATA_FA
+#
+TOOLTEST h5fc_latest_v3.h5 -d /DSET_NONE
+IDX_CHECK /DSET_NONE
+#
+TOOLTEST h5fc_latest_v3.h5 -d /GROUP/DSET_NDATA_NONE
+IDX_CHECK /GROUP/DSET_NDATA_NONE
+#
+#
+#
+# No output from tests: just check exit code
+# h5format_convert -d /DSET_NDATA_BT1 h5fc_v1.h5 (v1-btree dataset)
+# h5format_convert -d /GROUP/DSET_NON_CHUNKED h5fc_non_v3.h5 (non-chunked dataset)
+TOOLTEST h5fc_v1.h5 -d /DSET_NDATA_BT1
+TOOLTEST h5fc_non_v3.h5 -d /GROUP/DSET_NON_CHUNKED
+#
+#
+#
+# No output from tests: just check exit code
+# h5format_convert -d /GROUP/DSET_BT2 -n h5fc_non_v3.h5 (noop, one dataset)
+# h5format_convert -n h5fc_non_v3.h5 (noop, all datasets)
+TOOLTEST h5fc_non_v3.h5 -d /GROUP/DSET_BT2 -n
+TOOLTEST h5fc_non_v3.h5 -n
+#
+#
+#
+# No output from tests: just check exit code
+# h5format_convert h5fc_non_v3.h5
+# 1) convert all datasets
+# 2) verify indexing types
+TOOLTEST h5fc_non_v3.h5
+IDX_CHECK /DSET_NDATA_EA
+IDX_CHECK /DSET_NDATA_BT2
+IDX_CHECK /GROUP/DSET_BT2
+IDX_CHECK /GROUP/DSET_EA
+#
+#
+#
+# No output from test: just check exit code
+# h5format_convert h5fc_edge_v3.h5
+# 1) convert the chunked dataset (filter, no-filter-edge-chunk)
+# 2) verify the indexing type
+TOOLTEST h5fc_edge_v3.h5
+IDX_CHECK /DSET_EDGE
+#
+#
+#
+# Clean up temporary files/directories
+CLEAN_TESTFILES_AND_TESTDIR
+
+if test $nerrors -eq 0 ; then
+ echo "All $TESTNAME tests passed."
+ exit $EXIT_SUCCESS
+else
+ echo "$TESTNAME tests failed with $nerrors errors."
+ exit $EXIT_FAILURE
+fi
+
diff --git a/tools/h5import/testfiles/binfp64.h5 b/tools/h5import/testfiles/binfp64.h5
index 80e3a8a..5426edb 100644
--- a/tools/h5import/testfiles/binfp64.h5
+++ b/tools/h5import/testfiles/binfp64.h5
Binary files differ
diff --git a/tools/h5import/testfiles/binin16.h5 b/tools/h5import/testfiles/binin16.h5
index 0825bbc..6d89c63 100644
--- a/tools/h5import/testfiles/binin16.h5
+++ b/tools/h5import/testfiles/binin16.h5
Binary files differ
diff --git a/tools/h5import/testfiles/binin32.h5 b/tools/h5import/testfiles/binin32.h5
index fd8faa9..61cc507 100644
--- a/tools/h5import/testfiles/binin32.h5
+++ b/tools/h5import/testfiles/binin32.h5
Binary files differ
diff --git a/tools/h5import/testfiles/binin8.h5 b/tools/h5import/testfiles/binin8.h5
index a1d1a37..efd3ae0 100644
--- a/tools/h5import/testfiles/binin8.h5
+++ b/tools/h5import/testfiles/binin8.h5
Binary files differ
diff --git a/tools/h5import/testfiles/binuin16.h5 b/tools/h5import/testfiles/binuin16.h5
index c486c89..1af804c 100644
--- a/tools/h5import/testfiles/binuin16.h5
+++ b/tools/h5import/testfiles/binuin16.h5
Binary files differ
diff --git a/tools/h5import/testfiles/binuin32.h5 b/tools/h5import/testfiles/binuin32.h5
index 41699d7..e7d12d9 100644
--- a/tools/h5import/testfiles/binuin32.h5
+++ b/tools/h5import/testfiles/binuin32.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtfp32.h5 b/tools/h5import/testfiles/txtfp32.h5
index f74e003..bd41f06 100644
--- a/tools/h5import/testfiles/txtfp32.h5
+++ b/tools/h5import/testfiles/txtfp32.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtfp64.h5 b/tools/h5import/testfiles/txtfp64.h5
index b6ba4f5..41c4e9e 100644
--- a/tools/h5import/testfiles/txtfp64.h5
+++ b/tools/h5import/testfiles/txtfp64.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtin16.h5 b/tools/h5import/testfiles/txtin16.h5
index dc6c1ea..5089c97 100644
--- a/tools/h5import/testfiles/txtin16.h5
+++ b/tools/h5import/testfiles/txtin16.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtin32.h5 b/tools/h5import/testfiles/txtin32.h5
index 350333c..8d547cf 100644
--- a/tools/h5import/testfiles/txtin32.h5
+++ b/tools/h5import/testfiles/txtin32.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtin8.h5 b/tools/h5import/testfiles/txtin8.h5
index 42e7727..61b7165 100644
--- a/tools/h5import/testfiles/txtin8.h5
+++ b/tools/h5import/testfiles/txtin8.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtuin16.h5 b/tools/h5import/testfiles/txtuin16.h5
index 9ee166a..c43b875 100644
--- a/tools/h5import/testfiles/txtuin16.h5
+++ b/tools/h5import/testfiles/txtuin16.h5
Binary files differ
diff --git a/tools/h5import/testfiles/txtuin32.h5 b/tools/h5import/testfiles/txtuin32.h5
index 1a4dda5..aec3b0f 100644
--- a/tools/h5import/testfiles/txtuin32.h5
+++ b/tools/h5import/testfiles/txtuin32.h5
Binary files differ
diff --git a/tools/h5ls/h5ls.c b/tools/h5ls/h5ls.c
index cce5f3d..c962e80 100644
--- a/tools/h5ls/h5ls.c
+++ b/tools/h5ls/h5ls.c
@@ -70,6 +70,7 @@ static h5tool_format_t ls_dataformat = {
"{", /*cmpd_pre */
"}", /*cmpd_suf */
"", /*cmpd_end */
+ NULL, /* cmpd_listv */
",", /*vlen_sep */
"(", /*vlen_pre */
diff --git a/tools/h5ls/testh5ls.sh.in b/tools/h5ls/testh5ls.sh.in
index bf40bfa..68b317c 100644
--- a/tools/h5ls/testh5ls.sh.in
+++ b/tools/h5ls/testh5ls.sh.in
@@ -88,6 +88,7 @@ $SRC_H5LS_TESTFILES/tsoftlinks.h5
$SRC_H5LS_TESTFILES/tstr.h5
$SRC_H5LS_TESTFILES/tudlink.h5
$SRC_H5LS_TESTFILES/tvldtypes1.h5
+$SRC_H5LS_TESTFILES/tdset_idx.h5
"
LIST_OTHER_TEST_FILES="
@@ -147,6 +148,7 @@ $SRC_H5LS_TESTFILES/tudlink-1.ls
$SRC_H5LS_TESTFILES/tvldtypes1.ls
$SRC_H5LS_TESTFILES/tvldtypes2le.ls
$SRC_H5LS_TESTFILES/tvldtypes2be.ls
+$SRC_H5LS_TESTFILES/tdset_idx.ls
"
@@ -418,6 +420,9 @@ else
TOOLTEST tdataregbe.ls 0 -v tdatareg.h5
fi
+# test for file with datasets that use Fixed Array chunk indices
+echo "***skip testing tdset_idx.h5"
+#TOOLTEST tdset_idx.ls 0 -w80 -d tdset_idx.h5
# Clean up temporary files/directories
CLEAN_TESTFILES_AND_TESTDIR
diff --git a/tools/h5repack/h5repack.c b/tools/h5repack/h5repack.c
index c1e63f1..286d7f0 100644
--- a/tools/h5repack/h5repack.c
+++ b/tools/h5repack/h5repack.c
@@ -77,14 +77,14 @@ int h5repack(const char* infile, const char* outfile, pack_opt_t *options) {
*
*-------------------------------------------------------------------------
*/
-
-int h5repack_init(pack_opt_t *options, int verbose,
+int h5repack_init(pack_opt_t *options, int verbose, hbool_t latest,
H5F_file_space_type_t strategy, hsize_t threshold) {
int k, n;
HDmemset(options, 0, sizeof(pack_opt_t));
options->min_comp = 0;
options->verbose = verbose;
+ options->latest = latest;
options->layout_g = H5D_LAYOUT_ERROR;
for (n = 0; n < H5_REPACK_MAX_NFILTERS; n++) {
diff --git a/tools/h5repack/h5repack.h b/tools/h5repack/h5repack.h
index 69e36fc..329be1f 100644
--- a/tools/h5repack/h5repack.h
+++ b/tools/h5repack/h5repack.h
@@ -105,7 +105,7 @@ typedef struct {
int verbose; /*verbose mode */
hsize_t min_comp; /*minimum size to compress, in bytes */
int use_native; /*use a native type in write */
- int latest; /*pack file with the latest file format */
+ hbool_t latest; /*pack file with the latest file format */
int grp_compact; /* Set the maximum number of links to store as header messages in the group */
int grp_indexed; /* Set the minimum number of links to store in the indexed format */
int msg_size[8]; /* Minimum size of shared messages: dataspace,
@@ -138,7 +138,8 @@ extern "C" {
int h5repack(const char* infile, const char* outfile, pack_opt_t *options);
int h5repack_addfilter(const char* str, pack_opt_t *options);
int h5repack_addlayout(const char* str, pack_opt_t *options);
-int h5repack_init(pack_opt_t *options, int verbose, H5F_file_space_type_t strategy, hsize_t threshold);
+int h5repack_init(pack_opt_t *options, int verbose, hbool_t latest,
+ H5F_file_space_type_t strategy, hsize_t threshold);
int h5repack_end(pack_opt_t *options);
int h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options);
int h5repack_cmp_pl(const char *fname1, const char *fname2);
diff --git a/tools/h5repack/h5repack_main.c b/tools/h5repack/h5repack_main.c
index c56e3ce..708c173 100644
--- a/tools/h5repack/h5repack_main.c
+++ b/tools/h5repack/h5repack_main.c
@@ -193,6 +193,7 @@ static void usage(const char *prog) {
printf("\n");
}
+
/*-------------------------------------------------------------------------
* Function: leave
*
@@ -360,7 +361,7 @@ int parse_command_line(int argc, const char **argv, pack_opt_t* options) {
has_i_o = 1;
break;
- /* -o for backward compability */
+ /* -o for backward compability */
case 'o':
outfile = opt_arg;
has_i_o = 1;
@@ -423,19 +424,19 @@ int parse_command_line(int argc, const char **argv, pack_opt_t* options) {
break;
case 'L':
- options->latest = 1;
+ options->latest = TRUE;
break;
case 'c':
options->grp_compact = HDatoi( opt_arg );
if (options->grp_compact > 0)
- options->latest = 1; /* must use latest format */
+ options->latest = TRUE; /* must use latest format */
break;
case 'd':
options->grp_indexed = HDatoi( opt_arg );
if (options->grp_indexed > 0)
- options->latest = 1; /* must use latest format */
+ options->latest = TRUE; /* must use latest format */
break;
case 's':
@@ -443,7 +444,7 @@ int parse_command_line(int argc, const char **argv, pack_opt_t* options) {
int idx = 0;
int ssize = 0;
char *msgPtr = HDstrchr( opt_arg, ':');
- options->latest = 1; /* must use latest format */
+ options->latest = TRUE; /* must use latest format */
if (msgPtr == NULL) {
ssize = HDatoi( opt_arg );
for (idx = 0; idx < 5; idx++)
@@ -579,7 +580,7 @@ int main(int argc, const char **argv) {
}
/* initialize options */
- h5repack_init(&options, 0, H5F_FILE_SPACE_DEFAULT, (hsize_t) 0);
+ h5repack_init(&options, 0, FALSE, H5F_FILE_SPACE_DEFAULT, (hsize_t) 0);
if (parse_command_line(argc, argv, &options) < 0)
goto done;
diff --git a/tools/h5repack/h5repacktst.c b/tools/h5repack/h5repacktst.c
index 3959300..030b052 100644
--- a/tools/h5repack/h5repacktst.c
+++ b/tools/h5repack/h5repacktst.c
@@ -214,7 +214,7 @@ int main (void)
TESTING(" copy of datasets (fill values)");
/* fs_type = 0; fs_size = 0 */
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME0,FNAME0OUT,&pack_options) < 0)
GOERROR;
@@ -234,7 +234,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of datasets (all datatypes)");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME1,FNAME1OUT,&pack_options) < 0)
GOERROR;
@@ -254,7 +254,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of datasets (attributes)");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME2,FNAME2OUT,&pack_options) < 0)
GOERROR;
@@ -273,7 +273,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of datasets (hardlinks)");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME3,FNAME3OUT,&pack_options) < 0)
GOERROR;
@@ -293,7 +293,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of allocation early file");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME5,FNAME5OUT,&pack_options) < 0)
GOERROR;
@@ -314,7 +314,7 @@ int main (void)
* deflate
*-------------------------------------------------------------------------
*/
- TESTING(" adding deflate filter");
+ TESTING(" adding deflate filter (old_format)");
#ifdef H5_HAVE_FILTER_DEFLATE
@@ -323,7 +323,34 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ GOERROR;
+ if (h5repack_addfilter("dset1:GZIP=9",&pack_options) < 0)
+ GOERROR;
+ if (h5repack_addlayout("dset1:CHUNK=20x10",&pack_options) < 0)
+ GOERROR;
+ if (h5repack(FNAME4,FNAME4OUT,&pack_options) < 0)
+ GOERROR;
+ if (h5diff(FNAME4,FNAME4OUT,NULL,NULL,&diff_options) >0)
+ GOERROR;
+ if (h5repack_verify(FNAME4, FNAME4OUT, &pack_options) <= 0)
+ GOERROR;
+ if (h5repack_end (&pack_options) < 0)
+ GOERROR;
+ PASSED();
+#else
+ SKIPPED();
+#endif
+
+ TESTING(" adding deflate filter (new format)");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /*-------------------------------------------------------------------------
+ * test an individual object option
+ * For new format, "dset1" should be using Fixed Array indexing
+ *-------------------------------------------------------------------------
+ */
+
+ if (h5repack_init (&pack_options, 0, TRUE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset1:GZIP=9",&pack_options) < 0)
GOERROR;
@@ -351,7 +378,7 @@ int main (void)
#ifdef H5_HAVE_FILTER_DEFLATE
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("GZIP=1",&pack_options) < 0)
GOERROR;
@@ -389,7 +416,7 @@ int main (void)
*/
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset2:SZIP=8,EC",&pack_options) < 0)
GOERROR;
@@ -421,7 +448,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP)
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("SZIP=8,NN",&pack_options) < 0)
GOERROR;
@@ -450,7 +477,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset1:SHUF",&pack_options) < 0)
GOERROR;
@@ -475,7 +502,7 @@ int main (void)
TESTING(" addding shuffle filter to all");
/* fs_type = H5F_FILE_SPACE_ALL_PERSIST; fs_size = 1 */
- if (h5repack_init (&pack_options, 0, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
GOERROR;
if (h5repack_addfilter("SHUF",&pack_options) < 0)
GOERROR;
@@ -500,7 +527,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset1:FLET",&pack_options) < 0)
GOERROR;
@@ -525,7 +552,7 @@ int main (void)
TESTING(" adding checksum filter to all");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("FLET",&pack_options) < 0)
GOERROR;
@@ -550,7 +577,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CHUNK 20x10",&pack_options) < 0)
GOERROR;
@@ -583,14 +610,36 @@ int main (void)
PASSED();
- TESTING(" adding layout chunked");
+ TESTING(" adding layout chunked (old format)");
+
+ /*-------------------------------------------------------------------------
+ * test an individual object option
+ *-------------------------------------------------------------------------
+ */
+
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ GOERROR;
+ if (h5repack_addlayout("dset1:CHUNK=20x10",&pack_options) < 0)
+ GOERROR;
+ if (h5repack(FNAME4,FNAME4OUT,&pack_options) < 0)
+ GOERROR;
+ if (h5diff(FNAME4,FNAME4OUT,NULL,NULL,&diff_options) >0)
+ GOERROR;
+ if (h5repack_verify(FNAME4, FNAME4OUT, &pack_options )<= 0)
+ GOERROR;
+ if (h5repack_end (&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+ TESTING(" adding layout chunked (new format)");
/*-------------------------------------------------------------------------
* test an individual object option
+ * For new format, "dset1" should be using Fixed Array indexing
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, TRUE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CHUNK=20x10",&pack_options) < 0)
GOERROR;
@@ -610,7 +659,7 @@ int main (void)
*/
TESTING(" adding layout chunked to all");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("CHUNK=20x10",&pack_options) < 0)
GOERROR;
@@ -631,7 +680,7 @@ int main (void)
* test an individual object option
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CONTI",&pack_options) < 0)
GOERROR;
@@ -652,7 +701,7 @@ int main (void)
* test all objects option
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("CONTI",&pack_options) < 0)
GOERROR;
@@ -669,7 +718,7 @@ int main (void)
* do the same test for a file with filters (chunked)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("CONTI",&pack_options) < 0)
GOERROR;
@@ -691,7 +740,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset1:COMPA",&pack_options) < 0)
GOERROR;
@@ -713,7 +762,7 @@ int main (void)
*/
/* fs_type = H5F_FILE_SPACE_ALL; fs_size = 2 */
- if (h5repack_init (&pack_options, 0, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
GOERROR;
if (h5repack_addlayout("COMPA",&pack_options) < 0)
GOERROR;
@@ -735,7 +784,7 @@ int main (void)
* layout compact to contiguous conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_compact:CONTI",&pack_options) < 0)
GOERROR;
@@ -755,7 +804,7 @@ int main (void)
* layout compact to chunk conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_compact:CHUNK=2x5",&pack_options) < 0)
GOERROR;
@@ -775,7 +824,7 @@ int main (void)
* layout compact to compact conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_compact:COMPA",&pack_options) < 0)
GOERROR;
@@ -794,7 +843,7 @@ int main (void)
* layout contiguous to compact conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_contiguous:COMPA",&pack_options) < 0)
GOERROR;
@@ -813,7 +862,7 @@ int main (void)
* layout contiguous to chunk conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_contiguous:CHUNK=3x6",&pack_options) < 0)
GOERROR;
@@ -833,7 +882,7 @@ int main (void)
* layout contiguous to contiguous conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_contiguous:CONTI",&pack_options) < 0)
GOERROR;
@@ -852,7 +901,7 @@ int main (void)
* layout chunked to compact conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_chunk:COMPA",&pack_options) < 0)
GOERROR;
@@ -872,7 +921,7 @@ int main (void)
* layout chunked to contiguous conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_chunk:CONTI",&pack_options) < 0)
GOERROR;
@@ -891,7 +940,7 @@ int main (void)
* layout chunked to chunked conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addlayout("dset_chunk:CHUNK=18x13",&pack_options) < 0)
GOERROR;
@@ -922,7 +971,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP)
if (szip_can_encode) {
/* fs_type = H5F_FILE_SPACE_AGGR_VFD; fs_size = 3 */
- if (h5repack_init (&pack_options, 0, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
GOERROR;
if (h5repack(FNAME7,FNAME7OUT,&pack_options) < 0)
GOERROR;
@@ -947,7 +996,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP)
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_szip:NONE",&pack_options) < 0)
GOERROR;
@@ -972,7 +1021,7 @@ int main (void)
TESTING(" copy of deflate filter");
#ifdef H5_HAVE_FILTER_DEFLATE
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME8,FNAME8OUT,&pack_options) < 0)
GOERROR;
@@ -992,7 +1041,7 @@ int main (void)
TESTING(" removing deflate filter");
#ifdef H5_HAVE_FILTER_DEFLATE
- if (h5repack_init (&pack_options, 0, fs_type, ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, ++fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_deflate:NONE",&pack_options) < 0)
GOERROR;
@@ -1014,7 +1063,7 @@ int main (void)
TESTING(" copy of shuffle filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME9,FNAME9OUT,&pack_options) < 0)
GOERROR;
@@ -1030,7 +1079,7 @@ int main (void)
TESTING(" removing shuffle filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_shuffle:NONE",&pack_options) < 0)
GOERROR;
@@ -1048,7 +1097,7 @@ int main (void)
TESTING(" copy of fletcher filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME10,FNAME10OUT,&pack_options) < 0)
GOERROR;
@@ -1064,7 +1113,7 @@ int main (void)
TESTING(" removing fletcher filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_fletcher32:NONE",&pack_options) < 0)
GOERROR;
@@ -1082,7 +1131,7 @@ int main (void)
TESTING(" copy of nbit filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME12,FNAME12OUT,&pack_options) < 0)
GOERROR;
@@ -1098,7 +1147,7 @@ int main (void)
TESTING(" removing nbit filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_nbit:NONE",&pack_options) < 0)
GOERROR;
@@ -1116,7 +1165,7 @@ int main (void)
TESTING(" adding nbit filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_int31:NBIT",&pack_options) < 0)
GOERROR;
@@ -1134,7 +1183,7 @@ int main (void)
TESTING(" copy of scaleoffset filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME13,FNAME13OUT,&pack_options) < 0)
GOERROR;
@@ -1150,7 +1199,7 @@ int main (void)
TESTING(" removing scaleoffset filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_scaleoffset:NONE",&pack_options) < 0)
GOERROR;
@@ -1168,7 +1217,7 @@ int main (void)
TESTING(" adding scaleoffset filter");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_none:SOFF=31,IN",&pack_options) < 0)
GOERROR;
@@ -1201,7 +1250,7 @@ int main (void)
if (szip_can_encode) {
/* fs_type = H5F_FILE_SPACE_VFD; fs_size = 4 */
- if (h5repack_init (&pack_options, 0, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_deflate:SZIP=8,NN",&pack_options) < 0)
GOERROR;
@@ -1227,7 +1276,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP) && defined (H5_HAVE_FILTER_DEFLATE)
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("dset_szip:GZIP=1",&pack_options) < 0)
GOERROR;
@@ -1258,7 +1307,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP) && defined (H5_HAVE_FILTER_DEFLATE)
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("NONE",&pack_options) < 0)
GOERROR;
@@ -1282,7 +1331,7 @@ int main (void)
*/
TESTING(" big file");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME14,FNAME14OUT,&pack_options) < 0)
GOERROR;
@@ -1299,7 +1348,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" external datasets");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack(FNAME15,FNAME15OUT,&pack_options) < 0)
GOERROR;
@@ -1316,7 +1365,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" file with userblock");
- if(h5repack_init(&pack_options, 0, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if(h5repack(FNAME16, FNAME16OUT, &pack_options) < 0)
GOERROR;
@@ -1335,7 +1384,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" latest file format options");
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
pack_options.latest=1;
pack_options.grp_compact=10;
@@ -1365,7 +1414,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_DEFLATE)
- if (h5repack_init (&pack_options, 0, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if (h5repack_addfilter("GZIP=1",&pack_options) < 0)
GOERROR;
@@ -1394,7 +1443,7 @@ int main (void)
#ifdef H5_HAVE_FILTER_DEFLATE
- if(h5repack_init(&pack_options, 0, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
/* add the options for a user block size and user block filename */
@@ -1427,7 +1476,7 @@ int main (void)
#ifdef H5_HAVE_FILTER_DEFLATE
- if(h5repack_init(&pack_options, 0, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
/* add the options for aligment */
@@ -1481,7 +1530,7 @@ int main (void)
*/
TESTING(" file with committed datatypes");
- if(h5repack_init(&pack_options, 0, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
GOERROR;
if(h5repack(FNAME17, FNAME17OUT, &pack_options) < 0)
@@ -1508,7 +1557,7 @@ int main (void)
/* First run without metadata option. No need to verify the correctness */
/* since this has been verified by earlier tests. Just record the file */
/* size of the output file. */
- if(h5repack_init(&pack_options, 0, H5F_FILE_SPACE_DEFAULT, (hsize_t)0) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE, H5F_FILE_SPACE_DEFAULT, (hsize_t)0) < 0)
GOERROR;
if(h5repack(FNAME4, FNAME4OUT, &pack_options) < 0)
GOERROR;
@@ -1519,7 +1568,7 @@ int main (void)
GOERROR;
/* run it again with metadata option */
- if(h5repack_init(&pack_options, 0, H5F_FILE_SPACE_DEFAULT, (hsize_t)0) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE, H5F_FILE_SPACE_DEFAULT, (hsize_t)0) < 0)
GOERROR;
pack_options.meta_block_size = 8192;
if(h5repack(FNAME4, FNAME4OUT, &pack_options) < 0)
diff --git a/tools/h5repack/testfiles/h5repack_attr.h5 b/tools/h5repack/testfiles/h5repack_attr.h5
index fe066db..3bc4906 100644
--- a/tools/h5repack/testfiles/h5repack_attr.h5
+++ b/tools/h5repack/testfiles/h5repack_attr.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_deflate.h5 b/tools/h5repack/testfiles/h5repack_deflate.h5
index 3a4b86d..86d66c0 100644
--- a/tools/h5repack/testfiles/h5repack_deflate.h5
+++ b/tools/h5repack/testfiles/h5repack_deflate.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_early.h5 b/tools/h5repack/testfiles/h5repack_early.h5
index 9b92890..0394bef 100644
--- a/tools/h5repack/testfiles/h5repack_early.h5
+++ b/tools/h5repack/testfiles/h5repack_early.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_fill.h5 b/tools/h5repack/testfiles/h5repack_fill.h5
index 21516cb..81c32d5 100644
--- a/tools/h5repack/testfiles/h5repack_fill.h5
+++ b/tools/h5repack/testfiles/h5repack_fill.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_filters.h5 b/tools/h5repack/testfiles/h5repack_filters.h5
index 042b8db..0600adb 100644
--- a/tools/h5repack/testfiles/h5repack_filters.h5
+++ b/tools/h5repack/testfiles/h5repack_filters.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_fletcher.h5 b/tools/h5repack/testfiles/h5repack_fletcher.h5
index 2f137b0..e4e2c05 100644
--- a/tools/h5repack/testfiles/h5repack_fletcher.h5
+++ b/tools/h5repack/testfiles/h5repack_fletcher.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_hlink.h5 b/tools/h5repack/testfiles/h5repack_hlink.h5
index 3d22728..b00f9a2 100644
--- a/tools/h5repack/testfiles/h5repack_hlink.h5
+++ b/tools/h5repack/testfiles/h5repack_hlink.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_layouto.h5 b/tools/h5repack/testfiles/h5repack_layouto.h5
index a038e68..3322020 100644
--- a/tools/h5repack/testfiles/h5repack_layouto.h5
+++ b/tools/h5repack/testfiles/h5repack_layouto.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_nbit.h5 b/tools/h5repack/testfiles/h5repack_nbit.h5
index 3ada112..c678f1c 100644
--- a/tools/h5repack/testfiles/h5repack_nbit.h5
+++ b/tools/h5repack/testfiles/h5repack_nbit.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_shuffle.h5 b/tools/h5repack/testfiles/h5repack_shuffle.h5
index d13cca7..6520193 100644
--- a/tools/h5repack/testfiles/h5repack_shuffle.h5
+++ b/tools/h5repack/testfiles/h5repack_shuffle.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_soffset.h5 b/tools/h5repack/testfiles/h5repack_soffset.h5
index 89ee99a..a9457d3 100644
--- a/tools/h5repack/testfiles/h5repack_soffset.h5
+++ b/tools/h5repack/testfiles/h5repack_soffset.h5
Binary files differ
diff --git a/tools/h5repack/testfiles/h5repack_szip.h5 b/tools/h5repack/testfiles/h5repack_szip.h5
index b16d169..5eab9f8 100644
--- a/tools/h5repack/testfiles/h5repack_szip.h5
+++ b/tools/h5repack/testfiles/h5repack_szip.h5
Binary files differ
diff --git a/tools/h5stat/h5stat_gentest.c b/tools/h5stat/h5stat_gentest.c
index 100f5b2..02bd136 100644
--- a/tools/h5stat/h5stat_gentest.c
+++ b/tools/h5stat/h5stat_gentest.c
@@ -32,6 +32,11 @@
#define NUM_GRPS 35000
#define NUM_ATTRS 100
+/* Declarations for gen_idx_file() */
+#define IDX_FILE "h5stat_idx.h5"
+#define DSET "dset"
+#define DSET_FILTER "dset_filter"
+
/* For gen_threshold_file() */
#define THRESHOLD_FILE "h5stat_threshold.h5"
#define THRES_ATTR_NAME "attr"
@@ -43,7 +48,6 @@
/*
* Generate HDF5 file with latest format with
* NUM_GRPS groups and NUM_ATTRS attributes for the dataset
- *
*/
static void
gen_newgrat_file(const char *fname)
@@ -111,27 +115,33 @@ gen_newgrat_file(const char *fname)
} /* end for */
/* Close dataset, dataspace, datatype, file */
- if(H5Dclose(did) < 0)
+ if(H5Pclose(fapl) < 0)
goto error;
- if(H5Sclose(sid) < 0)
+ if(H5Pclose(fcpl) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
goto error;
if(H5Tclose(tid) < 0)
goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
if(H5Fclose(fid) < 0)
goto error;
error:
H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
H5Aclose(attr_id);
- H5Dclose(did);
H5Tclose(tid);
H5Sclose(sid);
H5Gclose(gid);
+ H5Dclose(did);
H5Fclose(fid);
} H5E_END_TRY;
-
} /* gen_newgrat_file() */
+
/*
* Generate an HDF5 file with groups, datasets, attributes for testing the options:
* -l N (--links=N): Set the threshold for # of links when printing information for small groups.
@@ -331,11 +341,110 @@ error:
} /* gen_threshold_file() */
+/*
+ * Function: gen_idx_file
+ *
+ * Purpose: Create a file with datasets that use Fixed Array indexing:
+ * one dataset: fixed dimension, chunked layout, w/o filters
+ * one dataset: fixed dimension, chunked layout, w/ filters
+ *
+ */
+static void
+gen_idx_file(const char *fname)
+{
+ hid_t fapl = -1; /* file access property id */
+ hid_t fid = -1; /* file id */
+ hid_t sid = -1; /* space id */
+ hid_t dcpl = -1; /* dataset creation property id */
+ hid_t did = -1, did2 = -1; /* dataset id */
+ hsize_t dims[1] = {10}; /* dataset dimension */
+ hsize_t c_dims[1] = {2}; /* chunk dimension */
+ int i; /* local index variable */
+ int buf[10]; /* data buffer */
+
+ /* Get a copy of the file access property */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ /* Set the "use the latest format" bounds for creating objects in the file */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ goto error;
+
+ /* Create file */
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto error;
+
+ /* Create data */
+ for(i = 0; i < 10; i++)
+ buf[i] = i;
+
+ /* Set chunk */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+
+ if(H5Pset_chunk(dcpl, 1, c_dims) < 0)
+ goto error;
+
+ /* Create a 1D dataset */
+ if((sid = H5Screate_simple(1, dims, NULL)) < 0)
+ goto error;
+ if((did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+#if defined (H5_HAVE_FILTER_DEFLATE)
+ /* set deflate data */
+ if(H5Pset_deflate(dcpl, 9) < 0)
+ goto error;
+
+ /* Create and write the dataset */
+ if((did2 = H5Dcreate2(fid, DSET_FILTER, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Close the dataset */
+ if(H5Dclose(did2) < 0)
+ goto error;
+#endif
+
+ /* closing: dataspace, dataset, file */
+ if(H5Pclose(fapl) < 0)
+ goto error;
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Dclose(did);
+ H5Fclose(fid);
+#if defined (H5_HAVE_FILTER_DEFLATE)
+ H5Dclose(did2);
+#endif
+ } H5E_END_TRY;
+
+} /* gen_idx_file() */
+
int main(void)
{
gen_newgrat_file(NEWGRAT_FILE);
gen_threshold_file(THRESHOLD_FILE);
+ /* Generate an HDF file to test for datasets with Fixed Array indexing */
+ gen_idx_file(IDX_FILE);
+
return 0;
}
diff --git a/tools/h5stat/testfiles/h5stat_filters.h5 b/tools/h5stat/testfiles/h5stat_filters.h5
index 5b5f4bb..cbd4467 100644
--- a/tools/h5stat/testfiles/h5stat_filters.h5
+++ b/tools/h5stat/testfiles/h5stat_filters.h5
Binary files differ
diff --git a/tools/h5stat/testfiles/h5stat_idx.ddl b/tools/h5stat/testfiles/h5stat_idx.ddl
new file mode 100644
index 0000000..b26f1a4
--- /dev/null
+++ b/tools/h5stat/testfiles/h5stat_idx.ddl
@@ -0,0 +1,93 @@
+Filename: h5stat_idx.h5
+File information
+ # of unique groups: 1
+ # of unique datasets: 2
+ # of unique named datatypes: 0
+ # of unique links: 0
+ # of unique other: 0
+ Max. # of links to object: 1
+ Max. # of objects in group: 2
+File space information for file metadata (in bytes):
+ Superblock: 48
+ Superblock extension: 0
+ User block: 0
+ Object headers: (total/unused)
+ Groups: 147/47
+ Datasets(exclude compact data): 568/362
+ Datatypes: 0/0
+ Groups:
+ B-tree/List: 0
+ Heap: 0
+ Attributes:
+ B-tree/List: 0
+ Heap: 0
+ Chunked datasets:
+ Index: 202
+ Datasets:
+ Heap: 0
+ Shared Messages:
+ Header: 0
+ B-tree/List: 0
+ Heap: 0
+ Free-space managers:
+ Header: 0
+ Amount of free space: 0
+Small groups (with 0 to 9 links):
+ # of groups with 2 link(s): 1
+ Total # of small groups: 1
+Group bins:
+ # of groups with 1 - 9 links: 1
+ Total # of groups: 1
+Dataset dimension information:
+ Max. rank of datasets: 1
+ Dataset ranks:
+ # of dataset with rank 1: 2
+1-D Dataset information:
+ Max. dimension size of 1-D datasets: 10
+ Small 1-D datasets (with dimension sizes 0 to 9):
+ Total # of small datasets: 0
+ 1-D Dataset dimension bins:
+ # of datasets with dimension size 10 - 99: 2
+ Total # of datasets: 2
+Dataset storage information:
+ Total raw data size: 110
+ Total external raw data size: 0
+Dataset layout information:
+ Dataset layout counts[COMPACT]: 0
+ Dataset layout counts[CONTIG]: 0
+ Dataset layout counts[CHUNKED]: 2
+ Dataset layout counts[VIRTUAL]: 0
+ Number of external files : 0
+Dataset filters information:
+ Number of datasets with:
+ NO filter: 1
+ GZIP filter: 1
+ SHUFFLE filter: 0
+ FLETCHER32 filter: 0
+ SZIP filter: 0
+ NBIT filter: 0
+ SCALEOFFSET filter: 0
+ USER-DEFINED filter: 0
+Dataset datatype information:
+ # of unique datatypes used by datasets: 1
+ Dataset datatype #0:
+ Count (total/named) = (2/0)
+ Size (desc./elmt) = (14/4)
+ Total dataset datatype count: 2
+Small # of attributes (objects with 1 to 10 attributes):
+ Total # of objects with small # of attributes: 0
+Attribute bins:
+ Total # of objects with attributes: 0
+ Max. # of attributes to objects: 0
+Free-space section threshold: 1 bytes
+Small size free-space sections (< 10 bytes):
+ Total # of small size sections: 0
+Free-space section bins:
+ Total # of sections: 0
+File space management strategy: H5F_FILE_SPACE_ALL
+Summary of file space information:
+ File metadata: 965 bytes
+ Raw data: 110 bytes
+ Amount/Percent of tracked free space: 0 bytes/0.0%
+ Unaccounted space: 1131 bytes
+Total space: 2206 bytes
diff --git a/tools/h5stat/testfiles/h5stat_idx.h5 b/tools/h5stat/testfiles/h5stat_idx.h5
new file mode 100644
index 0000000..303d1f8
--- /dev/null
+++ b/tools/h5stat/testfiles/h5stat_idx.h5
Binary files differ
diff --git a/tools/h5stat/testh5stat.sh.in b/tools/h5stat/testh5stat.sh.in
index 4d698da..b48f327 100644
--- a/tools/h5stat/testh5stat.sh.in
+++ b/tools/h5stat/testh5stat.sh.in
@@ -74,6 +74,7 @@ LIST_HDF5_TEST_FILES="
$SRC_H5STAT_TESTFILES/h5stat_filters.h5
$SRC_H5STAT_TESTFILES/h5stat_tsohm.h5
$SRC_H5STAT_TESTFILES/h5stat_newgrat.h5
+$SRC_H5STAT_TESTFILES/h5stat_idx.h5
$SRC_H5STAT_TESTFILES/h5stat_threshold.h5
"
@@ -94,6 +95,7 @@ $SRC_H5STAT_TESTFILES/h5stat_tsohm.ddl
$SRC_H5STAT_TESTFILES/h5stat_newgrat.ddl
$SRC_H5STAT_TESTFILES/h5stat_newgrat-UG.ddl
$SRC_H5STAT_TESTFILES/h5stat_newgrat-UA.ddl
+$SRC_H5STAT_TESTFILES/h5stat_idx.ddl
$SRC_H5STAT_TESTFILES/h5stat_err1_links.ddl
$SRC_H5STAT_TESTFILES/h5stat_links1.ddl
$SRC_H5STAT_TESTFILES/h5stat_links2.ddl
@@ -259,6 +261,8 @@ TOOLTEST h5stat_tsohm.ddl h5stat_tsohm.h5
TOOLTEST h5stat_newgrat.ddl h5stat_newgrat.h5
TOOLTEST h5stat_newgrat-UG.ddl -G h5stat_newgrat.h5
TOOLTEST h5stat_newgrat-UA.ddl -A h5stat_newgrat.h5
+# h5stat_idx.h5 is generated by h5stat_gentest.c
+TOOLTEST h5stat_idx.ddl h5stat_idx.h5
#
# Tests for -l (--links) option on h5stat_threshold.h5:
# -l 0 (incorrect threshold value)
diff --git a/tools/lib/h5tools.h b/tools/lib/h5tools.h
index d2e3ea6..a7f28e5 100644
--- a/tools/lib/h5tools.h
+++ b/tools/lib/h5tools.h
@@ -203,6 +203,10 @@ typedef struct h5tools_dump_header_t {
} h5tools_dump_header_t;
+/* Forward declaration (see declaration in h5tools_str.c) */
+struct H5LD_memb_t;
+
+
/*
* Information about how to format output.
*/
@@ -338,12 +342,16 @@ typedef struct h5tool_format_t {
*
* end: a string to print after we reach the last element of
* each compound type. prints out before the suf.
+ *
+ * listv: h5watch: vector containing info about the list of compound fields to be printed.
*/
const char *cmpd_name;
const char *cmpd_sep;
const char *cmpd_pre;
const char *cmpd_suf;
const char *cmpd_end;
+ const struct H5LD_memb_t **cmpd_listv;
+
/*
* Fields associated with vlen data types.
@@ -510,12 +518,13 @@ typedef struct h5tools_context_t {
hsize_t size_last_dim; /*the size of the last dimension,
*needed so we can break after each
*row */
- int indent_level; /*the number of times we need some
+ int indent_level; /*the number of times we need some
*extra indentation */
int default_indent_level; /*this is used when the indent level gets changed */
hsize_t acc[H5S_MAX_RANK]; /* accumulator position */
hsize_t pos[H5S_MAX_RANK]; /* matrix position */
hsize_t sm_pos; /* current stripmine element position */
+ struct H5LD_memb_t **cmpd_listv; /* h5watch: vector containing info about the list of compound fields to be printed */
} h5tools_context_t;
typedef struct subset_d {
diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c
index 0d39981..c6e96e4 100644
--- a/tools/lib/h5tools_dump.c
+++ b/tools/lib/h5tools_dump.c
@@ -61,6 +61,7 @@ NULL, /*fmt_ullong */
"{", /*cmpd_pre */
"}", /*cmpd_suf */
"\n", /*cmpd_end */
+NULL, /* cmpd_listv */
", ", /*vlen_sep */
"(", /*vlen_pre */
@@ -1488,6 +1489,9 @@ h5tools_dump_simple_subset(FILE *stream, const h5tool_format_t *info, h5tools_co
H5E_THROW(FAIL, H5E_tools_min_id_g, "H5Sget_simple_extent_dims failed");
ctx->size_last_dim = total_size[ctx->ndims - 1];
+ /* Set the compound datatype field list for display */
+ ctx->cmpd_listv = info->cmpd_listv;
+
h5tools_display_simple_subset(stream, info, ctx, dset, p_type, sset, f_space, total_size);
CATCH
diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c
index bdb82a4..5477f91 100644
--- a/tools/lib/h5tools_str.c
+++ b/tools/lib/h5tools_str.c
@@ -29,6 +29,15 @@
#include "h5tools_ref.h"
#include "h5tools_str.h" /*function prototypes */
+/* Copied from hl/src/H5LDprivate.h */
+/* Info about the list of comma-separated compound fields */
+typedef struct H5LD_memb_t {
+ size_t tot_offset;
+ size_t last_tsize;
+ hid_t last_tid;
+ char **names;
+} H5LD_memb_t;
+
/*
* If REPEAT_VERBOSE is defined then character strings will be printed so
* that repeated character sequences like "AAAAAAAAAA" are displayed as
@@ -267,6 +276,9 @@ h5tools_str_fmt(h5tools_str_t *str/*in,out*/, size_t start, const char *fmt)
{
char _temp[1024], *temp = _temp;
+ HDassert(str);
+ HDassert(fmt);
+
/* If the format string is simply "%s" then don't bother doing anything */
if (!HDstrcmp(fmt, "%s"))
return str->s;
@@ -731,6 +743,9 @@ h5tools_str_indent(h5tools_str_t *str, const h5tool_format_t *info,
* PVN, 28 March 2006
* added H5T_NATIVE_LDOUBLE case
*
+ * Vailin Choi; August 2010
+ * Modified to handle printing of selected compound fields for h5watch.
+ *
* Raymond Lu, 2011-09-01
* CLANG compiler complained about the line (about 800):
* tempint = (tempint >> packed_data_offset) & packed_data_mask;
@@ -1029,7 +1044,57 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
} /* end if (sizeof(long long) == nsize) */
break;
case H5T_COMPOUND:
- {
+ if(ctx->cmpd_listv) { /* there is <list_of_fields> */
+ int save_indent_level; /* The indentation level */
+ size_t curr_field; /* Current field to display */
+ int i = 0, x = 0; /* Local index variable */
+ H5LD_memb_t **listv; /* Vector of information for <list_of_fields> */
+
+ listv = ctx->cmpd_listv;
+ ctx->cmpd_listv = NULL;
+
+ h5tools_str_append(str, "%s", OPT(info->cmpd_pre, "{"));
+
+ /*
+ * Go through the vector containing info about the comma-separated list of
+ * compound fields and then members in each field:
+ * put in "{", "}", ",", member name and value accordingly.
+ */
+ save_indent_level = ctx->indent_level;
+ for(curr_field = 0; listv[curr_field] != NULL; curr_field++) {
+ if (curr_field)
+ h5tools_str_append(str, "%s", OPT(info->cmpd_sep, ", "OPTIONAL_LINE_BREAK));
+ else
+ h5tools_str_append(str, "%s", OPT(info->cmpd_end, ""));
+
+ if(info->arr_linebreak)
+ h5tools_str_indent(str, info, ctx);
+
+ /* Process members of each field */
+ for(i = 0; listv[curr_field]->names[i] != NULL; i++) {
+ h5tools_str_append(str, OPT(info->cmpd_name, ""), listv[curr_field]->names[i]);
+ if(i) {
+ ctx->indent_level++;
+ h5tools_str_append(str, "%s", OPT(info->cmpd_pre, "{"));
+ }
+ }
+ h5tools_str_sprint(str, info, container, listv[curr_field]->last_tid, cp_vp + listv[curr_field]->tot_offset, ctx);
+ if(ctx->indent_level > 0)
+ for(x = ctx->indent_level; x > 0; x--)
+ h5tools_str_append(str, "%s", OPT(info->cmpd_suf, "}"));
+ ctx->indent_level = save_indent_level;
+ }
+
+
+ if(info->arr_linebreak) {
+ h5tools_str_append(str, "%s", OPT(info->cmpd_end, ""));
+ h5tools_str_indent(str, info, ctx);
+ }
+ h5tools_str_append(str, "%s", OPT(info->cmpd_suf, "}"));
+
+ ctx->cmpd_listv = info->cmpd_listv;
+
+ } else {
unsigned j;
nmembs = H5Tget_nmembers(type);
diff --git a/tools/misc/Makefile.am b/tools/misc/Makefile.am
index fdfe8f7..bde6805 100644
--- a/tools/misc/Makefile.am
+++ b/tools/misc/Makefile.am
@@ -27,20 +27,24 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib
#test scripts and programs
TEST_PROG=h5repart_gentest talign
-TEST_SCRIPT=testh5repart.sh testh5mkgrp.sh
+TEST_SCRIPT=testh5repart.sh testh5mkgrp.sh testh5clear.sh
-check_PROGRAMS=$(TEST_PROG) repart_test
+
+check_PROGRAMS=$(TEST_PROG) repart_test clear_open_chk
check_SCRIPTS=$(TEST_SCRIPT)
-SCRIPT_DEPEND=h5repart$(EXEEXT) h5mkgrp$(EXEEXT)
+SCRIPT_DEPEND=h5repart$(EXEEXT) h5mkgrp$(EXEEXT) h5clear$(EXEEXT)
+
# These are our main targets, the tools
-bin_PROGRAMS=h5debug h5repart h5mkgrp
+bin_PROGRAMS=h5debug h5repart h5mkgrp h5clear
bin_SCRIPTS=h5redeploy
+noinst_PROGRAMS=h5clear_gentest
# Add h5debug, h5repart, and h5mkgrp specific linker flags here
h5debug_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
h5repart_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
h5mkgrp_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
+h5clear_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
# Tell automake to clean h5redeploy script
CLEANFILES=h5redeploy
@@ -51,7 +55,7 @@ CLEANFILES=h5redeploy
CHECK_CLEANFILES+=*.h5 ../testfiles/fst_family*.h5 ../testfiles/scd_family*.h5
# These were generated by configure. Remove them only when distclean.
-DISTCLEANFILES=h5cc testh5repart.sh
+DISTCLEANFILES=h5cc testh5repart.sh testh5clear.sh
# All programs rely on hdf5 library and h5tools library
LDADD=$(LIBH5TOOLS) $(LIBHDF5)
diff --git a/tools/misc/clear_open_chk.c b/tools/misc/clear_open_chk.c
new file mode 100644
index 0000000..f3e6ba3
--- /dev/null
+++ b/tools/misc/clear_open_chk.c
@@ -0,0 +1,72 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include "hdf5.h"
+#include "H5private.h"
+#include "h5tools.h"
+
+static void usage(void);
+
+static void
+usage(void)
+{
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, "Usage error!\n");
+ HDfprintf(stdout, "Usage: clear_open_chk filename\n");
+} /* usage() */
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To open the file which has zero or nonzero status_flags in
+ * the superblock.
+ *
+ * Return: 0 on success
+ * 1 on failure
+ *
+ * Programmer: Vailin Choi; July 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ char *fname; /* The HDF5 file name */
+ hid_t fid; /* File ID */
+
+ /* Check the # of arguments */
+ if(argc != 2) {
+ usage();
+ return(EXIT_FAILURE);
+ }
+
+ /* Get the file name */
+ fname = HDstrdup(argv[1]);
+
+ /* Try opening the file */
+ if((fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0) {
+ HDfprintf(stderr, "clear_open_chk: unable to open the file\n");
+ return EXIT_FAILURE;
+ }
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "clear_open_chk: cannot close the file\n");
+ return EXIT_FAILURE;
+ }
+
+ /* Return success */
+ return EXIT_SUCCESS;
+
+} /* main() */
diff --git a/tools/misc/h5clear.c b/tools/misc/h5clear.c
new file mode 100644
index 0000000..0be4f8f
--- /dev/null
+++ b/tools/misc/h5clear.c
@@ -0,0 +1,137 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer:
+ *
+ * Purpose:
+ */
+
+#include "hdf5.h"
+#include "H5private.h"
+#include "h5tools.h"
+#include "h5tools_utils.h"
+
+/* Name of tool */
+#define PROGRAMNAME "h5clear"
+
+/* Make this private property (defined in H5Fprivate.h) available to h5clear. */
+#define H5F_ACS_CLEAR_STATUS_FLAGS_NAME "clear_status_flags"
+
+/*-------------------------------------------------------------------------
+ * Function: leave
+ *
+ * Purpose: Close the tools library and exit
+ *
+ * Return: Does not return
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+leave(int ret)
+{
+ h5tools_close();
+ HDexit(ret);
+
+} /* leave() */
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: Prints a usage message
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ HDfprintf(stdout, "usage: h5clear filename\n");
+
+} /* usage() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose:
+ *
+ * Return: Success:
+ * Failure:
+ *
+ * Programmer:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main (int argc, char *argv[])
+{
+ char *fname; /* File name */
+ hbool_t clear = TRUE; /* To clear the status_flags in the file's superblock */
+ hid_t fapl = -1; /* File access property list */
+ hid_t fid = -1; /* File ID */
+
+ h5tools_setprogname(PROGRAMNAME);
+ h5tools_setstatus(EXIT_SUCCESS);
+
+ /* Disable the HDF5 library's error reporting */
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ /* initialize h5tools lib */
+ h5tools_init();
+
+ /* Check for the # of arguments */
+ if(argc != 2) {
+ usage();
+ leave(EXIT_FAILURE);
+ }
+
+ /* Duplicate the file name */
+ fname = HDstrdup(argv[opt_ind]);
+
+ /* Get a copy of the file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
+ error_msg("H5Pcreate\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Set to clear the status_flags in the file's superblock */
+ /* This is a private property used by h5clear only */
+ if(H5Pset(fapl, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear) < 0) {
+ error_msg("H5Pset\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if((fid = h5tools_fopen(fname, H5F_ACC_RDWR, fapl, NULL, NULL, (size_t)0)) < 0) {
+ error_msg("h5tools_fopen\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0) {
+ error_msg("H5Fclose\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* CLose the property list */
+ if(H5Pclose(fapl) < 0) {
+ error_msg("H5Pclose\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return EXIT_SUCCESS;
+} /* main() */
diff --git a/tools/misc/h5clear_gentest.c b/tools/misc/h5clear_gentest.c
new file mode 100644
index 0000000..0f20c35
--- /dev/null
+++ b/tools/misc/h5clear_gentest.c
@@ -0,0 +1,174 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include "hdf5.h"
+#include "H5private.h"
+
+/* The HDF5 test files */
+const char *FILENAME[] = {
+ "h5clear_sec2_v3.h5", /* 0 -- sec2 file with superblock version 3 */
+ "h5clear_log_v3.h5", /* 1 -- log file with superblock veresion 3 */
+ "h5clear_sec2_v0.h5", /* 2 -- sec2 file with superblock version 0 */
+ "h5clear_sec2_v2.h5" /* 3 -- sec2 file with superblock version 2 */
+};
+
+#define KB 1024U
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To create HDF5 files with non-zero status_flags in the superblock
+ * via flushing and exiting without closing the library.
+ *
+ * Due to file locking, status_flags in the superblock will be
+ * nonzero after H5Fcreate. The library will clear status_flags
+ * on file closing. This program, after "H5Fcreate" the files,
+ * exits without going through library closing. Thus, status_flags
+ * for these files are not cleared.
+ * The library will check consistency of status_flags when opening
+ * a file with superblock >= v3 and will return error accordingly.
+ * The library will not check status_flags when opening a file
+ * with < v3 superblock.
+ *
+ * These files are used by "h5clear" to see if the tool clears
+ * status_flags properly so users can open the files afterwards.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; July 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ hid_t fid; /* File ID */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl, new_fapl; /* File access property lists */
+ char fname[512]; /* File name */
+ unsigned new_format; /* To use latest library format or not */
+
+ /* Create a copy of the file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ /* Copy the file access property list */
+ if((new_fapl = H5Pcopy(fapl)) < 0)
+ goto error;
+ /* Set to latest library format */
+ if(H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ goto error;
+
+ /* Files created within this for loop will have v3 superblock and nonzero status_flags */
+ for(new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t fapl2, my_fapl; /* File access property lists */
+
+ /* Set to use the appropriate file access property list */
+ if(new_format)
+ fapl2 = new_fapl;
+ else
+ fapl2 = fapl;
+ /*
+ * Create a sec2 file
+ */
+ if((my_fapl = H5Pcopy(fapl2)) < 0)
+ goto error;
+ /* Create the file */
+ sprintf(fname, "%s%s", new_format? "latest_":"", FILENAME[0]);
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, my_fapl)) < 0)
+ goto error;
+
+ /* Flush the file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
+
+ /* Close the property list */
+ if(H5Pclose(my_fapl) < 0)
+ goto error;
+
+ /*
+ * Create a log file
+ */
+ /* Create a copy of file access property list */
+ if((my_fapl = H5Pcopy(fapl2)) < 0)
+ goto error;
+
+ /* Setup the fapl for the log driver */
+ if(H5Pset_fapl_log(my_fapl, "append.log", (unsigned long long)H5FD_LOG_ALL, (size_t)(4 * KB)) < 0)
+ goto error;
+
+ /* Create the file */
+ sprintf(fname, "%s%s", new_format? "latest_":"", FILENAME[1]);
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, my_fapl)) < 0)
+ goto error;
+
+ /* Flush the file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
+
+ /* Close the property list */
+ if(H5Pclose(my_fapl) < 0)
+ goto error;
+
+ } /* end for */
+
+ /*
+ * Create a sec2 file with v0 superblock but nonzero status_flags
+ */
+ if((fid = H5Fcreate(FILENAME[2], H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto error;
+
+ /* Flush the file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
+
+
+ /*
+ * Create a sec2 file with v2 superblock but nonzero status_flags
+ */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ goto error;
+ if(H5Pset_shared_mesg_nindexes(fcpl, 1) < 0)
+ goto error;
+ if(H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_DTYPE_FLAG, 50) < 0)
+ goto error;
+
+ if((fid = H5Fcreate(FILENAME[3], H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ goto error;
+
+ /* Flush the file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
+
+
+ /* Close the property lists */
+ if(H5Pclose(fapl) < 0)
+ goto error;
+ if(H5Pclose(new_fapl) < 0)
+ goto error;
+ if(H5Pclose(fcpl) < 0)
+ goto error;
+
+ fflush(stdout);
+ fflush(stderr);
+
+ /* Not going through library closing by calling _exit(0) with success */
+ HD_exit(0);
+
+error:
+
+ /* Exit with failure */
+ HD_exit(1);
+}
diff --git a/tools/misc/h5debug.c b/tools/misc/h5debug.c
index 480450d..aac14db 100644
--- a/tools/misc/h5debug.c
+++ b/tools/misc/h5debug.c
@@ -121,6 +121,14 @@ get_H5B2_class(const uint8_t *sig)
cls = H5A_BT2_CORDER;
break;
+ case H5B2_CDSET_ID:
+ cls = H5D_BT2;
+ break;
+
+ case H5B2_CDSET_FILT_ID:
+ cls = H5D_BT2_FILT;
+ break;
+
case H5B2_NUM_BTREE_ID:
default:
HDfprintf(stderr, "Unknown v2 B-tree subtype %u\n", (unsigned)(subtype));
@@ -157,6 +165,14 @@ get_H5EA_class(const uint8_t *sig)
cls = H5EA_CLS_TEST;
break;
+ case H5EA_CLS_CHUNK_ID:
+ cls = H5EA_CLS_CHUNK;
+ break;
+
+ case H5EA_CLS_FILT_CHUNK_ID:
+ cls = H5EA_CLS_FILT_CHUNK;
+ break;
+
case H5EA_NUM_CLS_ID:
default:
HDfprintf(stderr, "Unknown extensible array class %u\n", (unsigned)(clsid));
@@ -193,6 +209,14 @@ get_H5FA_class(const uint8_t *sig)
cls = H5FA_CLS_TEST;
break;
+ case H5FA_CLS_CHUNK_ID:
+ cls = H5FA_CLS_CHUNK;
+ break;
+
+ case H5FA_CLS_FILT_CHUNK_ID:
+ cls = H5FA_CLS_FILT_CHUNK;
+ break;
+
case H5FA_NUM_CLS_ID:
default:
HDfprintf(stderr, "Unknown fixed array class %u\n", (unsigned)(clsid));
@@ -398,6 +422,13 @@ main(int argc, char *argv[])
const H5B2_class_t *cls = get_H5B2_class(sig);
HDassert(cls);
+ if((cls == H5D_BT2 || cls == H5D_BT2_FILT) && extra == 0) {
+ fprintf(stderr, "ERROR: Need v2 B-tree header address and object header address containing the layout message in order to dump header\n");
+ fprintf(stderr, "v2 B-tree hdr usage:\n");
+ fprintf(stderr, "\th5debug <filename> <v2 B-tree header address> <object header address>\n");
+ HDexit(4);
+ }
+
status = H5B2__hdr_debug(f, H5P_DATASET_XFER_DEFAULT, addr, stdout, 0, VCOL, cls, (haddr_t)extra);
} else if(!HDmemcmp(sig, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
@@ -408,7 +439,16 @@ main(int argc, char *argv[])
HDassert(cls);
/* Check for enough valid parameters */
- if(extra == 0 || extra2 == 0 || extra3 == 0) {
+ if((cls == H5D_BT2 || cls == H5D_BT2_FILT) &&
+ (extra == 0 || extra2 == 0 || extra3 == 0 || extra4 == 0)) {
+
+ fprintf(stderr, "ERROR: Need v2 B-tree header address, the node's number of records, depth, and object header address containing the layout message in order to dump internal node\n");
+ fprintf(stderr, "NOTE: Leaf nodes are depth 0, the internal nodes above them are depth 1, etc.\n");
+ fprintf(stderr, "v2 B-tree internal node usage:\n");
+ fprintf(stderr, "\th5debug <filename> <internal node address> <v2 B-tree header address> <number of records> <depth> <object header address>\n");
+ HDexit(4);
+
+ } else if(extra == 0 || extra2 == 0 || extra3 == 0) {
HDfprintf(stderr, "ERROR: Need v2 B-tree header address and the node's number of records and depth in order to dump internal node\n");
HDfprintf(stderr, "NOTE: Leaf nodes are depth 0, the internal nodes above them are depth 1, etc.\n");
HDfprintf(stderr, "v2 B-tree internal node usage:\n");
@@ -426,7 +466,15 @@ main(int argc, char *argv[])
HDassert(cls);
/* Check for enough valid parameters */
- if(extra == 0 || extra2 == 0) {
+ if((cls == H5D_BT2 || cls == H5D_BT2_FILT) &&
+ (extra == 0 || extra2 == 0 || extra3 == 0 )) {
+
+ fprintf(stderr, "ERROR: Need v2 B-tree header address, number of records, and object header address containing the layout message in order to dump leaf node\n");
+ fprintf(stderr, "v2 B-tree leaf node usage:\n");
+ fprintf(stderr, "\th5debug <filename> <leaf node address> <v2 B-tree header address> <number of records> <object header address>\n");
+ HDexit(4);
+
+ } else if(extra == 0 || extra2 == 0) {
HDfprintf(stderr, "ERROR: Need v2 B-tree header address and number of records in order to dump leaf node\n");
HDfprintf(stderr, "v2 B-tree leaf node usage:\n");
HDfprintf(stderr, "\th5debug <filename> <leaf node address> <v2 B-tree header address> <number of records>\n");
diff --git a/tools/misc/testh5clear.sh.in b/tools/misc/testh5clear.sh.in
new file mode 100644
index 0000000..aeac03e
--- /dev/null
+++ b/tools/misc/testh5clear.sh.in
@@ -0,0 +1,130 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the files COPYING and Copyright.html. COPYING can be found at the root
+# of the source code distribution tree; Copyright.html can be found at the
+# root level of an installed copy of the electronic HDF5 document set and
+# is linked from the top-level documents page. It can also be found at
+# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
+# access to either file, you may request a copy from help@hdfgroup.org.
+#
+# Tests for the h5clear tool
+#
+srcdir=@srcdir@
+TESTNAME=h5clear
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+
+H5CLEAR=h5clear
+H5CLEAR_BIN=`pwd`/$H5CLEAR # The path of the tool binary
+
+GENTEST=h5clear_gentest # Generate test files
+GENTEST_BIN=`pwd`/$GENTEST # The path to the binary
+
+OPENCHK=clear_open_chk # Try opening the test file
+OPENCHK_BIN=`pwd`/$OPENCHK # The path to the binary
+
+SUCCEED=0
+FAIL=1
+
+nerrors=0
+verbose=yes
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# (1) Use "h5clear" to clear the status_flags in the test file
+# (2) Open the test file via "clear_open_chk"
+# $1 is the filename for testing
+TOOLTEST() {
+ TESTING $H5CLEAR $1
+ fname=$1
+ # Use "h5clear" to clear the status_flags in the test file
+ $RUNSERIAL $H5CLEAR_BIN $fname
+ if test $? -ne $SUCCEED; then
+ echo ".....$H5CLEAR: should succeed"
+ nerrors=`expr $nerrors + 1`
+ else
+ # Open the test file via "clear_open_chk"
+ $OPENCHK_BIN $fname
+ if test $? -ne $SUCCEED; then
+ echo "......$OPENCHK: should succeed"
+ nerrors=`expr $nerrors + 1`
+ else
+ echo "PASSED"
+ fi
+ fi
+}
+
+
+
+# Use "clear_open_chk" to check if the file open succeeds or fails
+# $1 is the filename to open
+# $2 is the expected return from "clear_open_chk"
+OPENCHK() {
+ fname=$1
+ expected=$2
+ #
+ $OPENCHK_BIN $fname 2>/dev/null
+ actual=$?
+ if test $actual -ne $expected; then
+ echo "Unexpected return from $OPENCHK"
+ nerrors=`expr $nerrors + 1`
+ fi
+}
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+#
+$GENTEST_BIN # Create HDF5 test files
+if test $? -ne 0; then # Error returned from generating test files
+ echo "$GENTEST: .....fail in generating test files"
+ nerrors=`expr $nerrors + 1`
+else
+ # Initial file open fails
+ # After "h5clear" the file, the subsequent file open succeeds
+ OPENCHK h5clear_sec2_v3.h5 $FAIL
+ TOOLTEST h5clear_sec2_v3.h5
+ #
+ OPENCHK h5clear_log_v3.h5 $FAIL
+ TOOLTEST h5clear_log_v3.h5
+ #
+ OPENCHK latest_h5clear_sec2_v3.h5 $FAIL
+ TOOLTEST latest_h5clear_sec2_v3.h5
+ #
+ OPENCHK latest_h5clear_log_v3.h5 $FAIL
+ TOOLTEST latest_h5clear_log_v3.h5
+fi
+#
+#
+# File open succeeds because the library does not check status_flags for file with < v3 superblock
+OPENCHK h5clear_sec2_v0.h5 $SUCCEED
+TOOLTEST h5clear_sec2_v0.h5
+OPENCHK h5clear_sec2_v2.h5 $SUCCEED
+TOOLTEST h5clear_sec2_v2.h5
+#
+# Clean up test files
+if test -z "$HDF5_NOCLEANUP"; then
+ rm -f h5clear_*.h5 latest_h5clear*.h5
+fi
+
+if test $nerrors -eq 0 ; then
+ echo "All $TESTNAME tests passed."
+ exit $EXIT_SUCCESS
+else
+ echo "$TESTNAME tests failed with $nerrors error(s)."
+ exit $EXIT_FAILURE
+fi
diff --git a/tools/testfiles/family_file00000.h5 b/tools/testfiles/family_file00000.h5
index 88f75ca..d86fb96 100644
--- a/tools/testfiles/family_file00000.h5
+++ b/tools/testfiles/family_file00000.h5
Binary files differ
diff --git a/tools/testfiles/taindices.h5 b/tools/testfiles/taindices.h5
index b482a21..c0e5a68 100644
--- a/tools/testfiles/taindices.h5
+++ b/tools/testfiles/taindices.h5
Binary files differ
diff --git a/tools/testfiles/tarray1.h5 b/tools/testfiles/tarray1.h5
index 90371f2..b39d966 100644
--- a/tools/testfiles/tarray1.h5
+++ b/tools/testfiles/tarray1.h5
Binary files differ
diff --git a/tools/testfiles/tarray2.h5 b/tools/testfiles/tarray2.h5
index e2e53e8..4c0b105 100644
--- a/tools/testfiles/tarray2.h5
+++ b/tools/testfiles/tarray2.h5
Binary files differ
diff --git a/tools/testfiles/tarray3.h5 b/tools/testfiles/tarray3.h5
index 580d846..dbc6031 100644
--- a/tools/testfiles/tarray3.h5
+++ b/tools/testfiles/tarray3.h5
Binary files differ
diff --git a/tools/testfiles/tarray4.h5 b/tools/testfiles/tarray4.h5
index b34efb8..142822b 100644
--- a/tools/testfiles/tarray4.h5
+++ b/tools/testfiles/tarray4.h5
Binary files differ
diff --git a/tools/testfiles/tarray5.h5 b/tools/testfiles/tarray5.h5
index 55ebf46..e597e3b 100644
--- a/tools/testfiles/tarray5.h5
+++ b/tools/testfiles/tarray5.h5
Binary files differ
diff --git a/tools/testfiles/tarray6.h5 b/tools/testfiles/tarray6.h5
index 7eb078c..d5ad021 100644
--- a/tools/testfiles/tarray6.h5
+++ b/tools/testfiles/tarray6.h5
Binary files differ
diff --git a/tools/testfiles/tarray7.h5 b/tools/testfiles/tarray7.h5
index 74089ea..a744ed5 100644
--- a/tools/testfiles/tarray7.h5
+++ b/tools/testfiles/tarray7.h5
Binary files differ
diff --git a/tools/testfiles/tattr.h5 b/tools/testfiles/tattr.h5
index d61def5..bceb228 100644
--- a/tools/testfiles/tattr.h5
+++ b/tools/testfiles/tattr.h5
Binary files differ
diff --git a/tools/testfiles/tattr2.h5 b/tools/testfiles/tattr2.h5
index c40f3f7..f062399 100644
--- a/tools/testfiles/tattr2.h5
+++ b/tools/testfiles/tattr2.h5
Binary files differ
diff --git a/tools/testfiles/tbigdims.h5 b/tools/testfiles/tbigdims.h5
index c54c2c3..50bf2fd 100644
--- a/tools/testfiles/tbigdims.h5
+++ b/tools/testfiles/tbigdims.h5
Binary files differ
diff --git a/tools/testfiles/tbitfields.h5 b/tools/testfiles/tbitfields.h5
index 11087f0..b1b7751 100644
--- a/tools/testfiles/tbitfields.h5
+++ b/tools/testfiles/tbitfields.h5
Binary files differ
diff --git a/tools/testfiles/tchar.h5 b/tools/testfiles/tchar.h5
index 4d23ea9..0391772 100644
--- a/tools/testfiles/tchar.h5
+++ b/tools/testfiles/tchar.h5
Binary files differ
diff --git a/tools/testfiles/tcompound.h5 b/tools/testfiles/tcompound.h5
index d1ec650..edef9d3 100644
--- a/tools/testfiles/tcompound.h5
+++ b/tools/testfiles/tcompound.h5
Binary files differ
diff --git a/tools/testfiles/tcompound2.h5 b/tools/testfiles/tcompound2.h5
index 98e80bd..482f9fd 100644
--- a/tools/testfiles/tcompound2.h5
+++ b/tools/testfiles/tcompound2.h5
Binary files differ
diff --git a/tools/testfiles/tcompound_complex.h5 b/tools/testfiles/tcompound_complex.h5
index 5c6274f..a0c90eb 100644
--- a/tools/testfiles/tcompound_complex.h5
+++ b/tools/testfiles/tcompound_complex.h5
Binary files differ
diff --git a/tools/testfiles/tdatareg.h5 b/tools/testfiles/tdatareg.h5
index 62a889f..631d6b0 100644
--- a/tools/testfiles/tdatareg.h5
+++ b/tools/testfiles/tdatareg.h5
Binary files differ
diff --git a/tools/testfiles/tdset.h5 b/tools/testfiles/tdset.h5
index 71dcb91..ae19cf4 100644
--- a/tools/testfiles/tdset.h5
+++ b/tools/testfiles/tdset.h5
Binary files differ
diff --git a/tools/testfiles/tdset2.h5 b/tools/testfiles/tdset2.h5
index 5e17cfd..f3e555b 100644
--- a/tools/testfiles/tdset2.h5
+++ b/tools/testfiles/tdset2.h5
Binary files differ
diff --git a/tools/testfiles/tdset_idx.ddl b/tools/testfiles/tdset_idx.ddl
new file mode 100644
index 0000000..65d9f44
--- /dev/null
+++ b/tools/testfiles/tdset_idx.ddl
@@ -0,0 +1,61 @@
+#############################
+Expected output for 'h5dump -p -H tdset_idx.h5'
+#############################
+HDF5 "tdset_idx.h5" {
+GROUP "/" {
+ DATASET "dset_btree" {
+ DATATYPE H5T_STD_I32LE
+ DATASPACE SIMPLE { ( 20, 10 ) / ( 200, 100 ) }
+ STORAGE_LAYOUT {
+ CHUNKED ( 5, 5 )
+ SIZE 800
+ }
+ FILTERS {
+ NONE
+ }
+ FILLVALUE {
+ FILL_TIME H5D_FILL_TIME_IFSET
+ VALUE 0
+ }
+ ALLOCATION_TIME {
+ H5D_ALLOC_TIME_INCR
+ }
+ }
+ DATASET "dset_filter" {
+ DATATYPE H5T_STD_I32LE
+ DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) }
+ STORAGE_LAYOUT {
+ CHUNKED ( 5, 5 )
+ SIZE 200 (4.000:1 COMPRESSION)
+ }
+ FILTERS {
+ COMPRESSION DEFLATE { LEVEL 9 }
+ }
+ FILLVALUE {
+ FILL_TIME H5D_FILL_TIME_IFSET
+ VALUE 0
+ }
+ ALLOCATION_TIME {
+ H5D_ALLOC_TIME_INCR
+ }
+ }
+ DATASET "dset_fixed" {
+ DATATYPE H5T_STD_I32LE
+ DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) }
+ STORAGE_LAYOUT {
+ CHUNKED ( 5, 5 )
+ SIZE 800
+ }
+ FILTERS {
+ NONE
+ }
+ FILLVALUE {
+ FILL_TIME H5D_FILL_TIME_IFSET
+ VALUE 0
+ }
+ ALLOCATION_TIME {
+ H5D_ALLOC_TIME_INCR
+ }
+ }
+}
+}
diff --git a/tools/testfiles/tdset_idx.h5 b/tools/testfiles/tdset_idx.h5
new file mode 100644
index 0000000..314de9b
--- /dev/null
+++ b/tools/testfiles/tdset_idx.h5
Binary files differ
diff --git a/tools/testfiles/tdset_idx.ls b/tools/testfiles/tdset_idx.ls
new file mode 100644
index 0000000..daa14b2
--- /dev/null
+++ b/tools/testfiles/tdset_idx.ls
@@ -0,0 +1,36 @@
+dset_btree Dataset {20/200, 10/100}
+ Data:
+ (0,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
+ (2,2) 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3,
+ (4,4) 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+ (6,6) 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7,
+ (8,8) 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ (11,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
+ (13,2) 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3,
+ (15,4) 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+ (17,6) 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7,
+ (19,8) 8, 9
+dset_filter Dataset {20, 10}
+ Data:
+ (0,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
+ (2,2) 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3,
+ (4,4) 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+ (6,6) 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7,
+ (8,8) 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ (11,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
+ (13,2) 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3,
+ (15,4) 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+ (17,6) 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7,
+ (19,8) 8, 9
+dset_fixed Dataset {20, 10}
+ Data:
+ (0,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
+ (2,2) 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3,
+ (4,4) 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+ (6,6) 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7,
+ (8,8) 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ (11,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1,
+ (13,2) 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3,
+ (15,4) 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+ (17,6) 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7,
+ (19,8) 8, 9
diff --git a/tools/testfiles/tempty.h5 b/tools/testfiles/tempty.h5
index d7d903f..f6d6b7a 100644
--- a/tools/testfiles/tempty.h5
+++ b/tools/testfiles/tempty.h5
Binary files differ
diff --git a/tools/testfiles/tenum.h5 b/tools/testfiles/tenum.h5
index b1300d8..5521fc4 100644
--- a/tools/testfiles/tenum.h5
+++ b/tools/testfiles/tenum.h5
Binary files differ
diff --git a/tools/testfiles/tfamily00000.h5 b/tools/testfiles/tfamily00000.h5
index a130bfd..70f6dcf 100644
--- a/tools/testfiles/tfamily00000.h5
+++ b/tools/testfiles/tfamily00000.h5
Binary files differ
diff --git a/tools/testfiles/tfcontents2.h5 b/tools/testfiles/tfcontents2.h5
index 1df0779..1514e28 100644
--- a/tools/testfiles/tfcontents2.h5
+++ b/tools/testfiles/tfcontents2.h5
Binary files differ
diff --git a/tools/testfiles/tfvalues.h5 b/tools/testfiles/tfvalues.h5
index a6af958..c238f44 100644
--- a/tools/testfiles/tfvalues.h5
+++ b/tools/testfiles/tfvalues.h5
Binary files differ
diff --git a/tools/testfiles/tgroup.h5 b/tools/testfiles/tgroup.h5
index 565fb67..7c5cd63 100644
--- a/tools/testfiles/tgroup.h5
+++ b/tools/testfiles/tgroup.h5
Binary files differ
diff --git a/tools/testfiles/thlink.h5 b/tools/testfiles/thlink.h5
index 6e0e7e1..0ad92d1 100644
--- a/tools/testfiles/thlink.h5
+++ b/tools/testfiles/thlink.h5
Binary files differ
diff --git a/tools/testfiles/thyperslab.h5 b/tools/testfiles/thyperslab.h5
index a730aa3..dc2c0db 100644
--- a/tools/testfiles/thyperslab.h5
+++ b/tools/testfiles/thyperslab.h5
Binary files differ
diff --git a/tools/testfiles/tlarge_objname.h5 b/tools/testfiles/tlarge_objname.h5
index 707d7b6..9e26283 100644
--- a/tools/testfiles/tlarge_objname.h5
+++ b/tools/testfiles/tlarge_objname.h5
Binary files differ
diff --git a/tools/testfiles/tlonglinks.h5 b/tools/testfiles/tlonglinks.h5
index f54e5f5..6c2cab4 100644
--- a/tools/testfiles/tlonglinks.h5
+++ b/tools/testfiles/tlonglinks.h5
Binary files differ
diff --git a/tools/testfiles/tloop.h5 b/tools/testfiles/tloop.h5
index d7a2f47..e54f91c 100644
--- a/tools/testfiles/tloop.h5
+++ b/tools/testfiles/tloop.h5
Binary files differ
diff --git a/tools/testfiles/tloop2.h5 b/tools/testfiles/tloop2.h5
index 995bfab..60d7ed2 100644
--- a/tools/testfiles/tloop2.h5
+++ b/tools/testfiles/tloop2.h5
Binary files differ
diff --git a/tools/testfiles/tmulti-s.h5 b/tools/testfiles/tmulti-s.h5
index 3feae49..bc2fafb 100644
--- a/tools/testfiles/tmulti-s.h5
+++ b/tools/testfiles/tmulti-s.h5
Binary files differ
diff --git a/tools/testfiles/tname-amp.h5 b/tools/testfiles/tname-amp.h5
index 27ab521..82f6316 100644
--- a/tools/testfiles/tname-amp.h5
+++ b/tools/testfiles/tname-amp.h5
Binary files differ
diff --git a/tools/testfiles/tname-apos.h5 b/tools/testfiles/tname-apos.h5
index 42d0fc3..9b49159 100644
--- a/tools/testfiles/tname-apos.h5
+++ b/tools/testfiles/tname-apos.h5
Binary files differ
diff --git a/tools/testfiles/tname-gt.h5 b/tools/testfiles/tname-gt.h5
index fabd154..1d2795e 100644
--- a/tools/testfiles/tname-gt.h5
+++ b/tools/testfiles/tname-gt.h5
Binary files differ
diff --git a/tools/testfiles/tname-lt.h5 b/tools/testfiles/tname-lt.h5
index 6b13375..e3dafe0 100644
--- a/tools/testfiles/tname-lt.h5
+++ b/tools/testfiles/tname-lt.h5
Binary files differ
diff --git a/tools/testfiles/tname-quot.h5 b/tools/testfiles/tname-quot.h5
index eb8d6cc..fd9e862 100644
--- a/tools/testfiles/tname-quot.h5
+++ b/tools/testfiles/tname-quot.h5
Binary files differ
diff --git a/tools/testfiles/tname-sp.h5 b/tools/testfiles/tname-sp.h5
index 0201efd..514eb8d 100644
--- a/tools/testfiles/tname-sp.h5
+++ b/tools/testfiles/tname-sp.h5
Binary files differ
diff --git a/tools/testfiles/tnestedcomp.h5 b/tools/testfiles/tnestedcomp.h5
index b777ee3..2912310 100644
--- a/tools/testfiles/tnestedcomp.h5
+++ b/tools/testfiles/tnestedcomp.h5
Binary files differ
diff --git a/tools/testfiles/tnodata.h5 b/tools/testfiles/tnodata.h5
index 8e0a89d..1e537f1 100644
--- a/tools/testfiles/tnodata.h5
+++ b/tools/testfiles/tnodata.h5
Binary files differ
diff --git a/tools/testfiles/tnullspace.h5 b/tools/testfiles/tnullspace.h5
index b5dac5d..d405061 100644
--- a/tools/testfiles/tnullspace.h5
+++ b/tools/testfiles/tnullspace.h5
Binary files differ
diff --git a/tools/testfiles/tobjref.h5 b/tools/testfiles/tobjref.h5
index ef42ff3..ecf3827 100644
--- a/tools/testfiles/tobjref.h5
+++ b/tools/testfiles/tobjref.h5
Binary files differ
diff --git a/tools/testfiles/topaque.h5 b/tools/testfiles/topaque.h5
index d56b594..669f4b7 100644
--- a/tools/testfiles/topaque.h5
+++ b/tools/testfiles/topaque.h5
Binary files differ
diff --git a/tools/testfiles/tref-escapes-at.h5 b/tools/testfiles/tref-escapes-at.h5
index 641513c..e3f0d6b 100644
--- a/tools/testfiles/tref-escapes-at.h5
+++ b/tools/testfiles/tref-escapes-at.h5
Binary files differ
diff --git a/tools/testfiles/tref-escapes.h5 b/tools/testfiles/tref-escapes.h5
index 6c7638b..766a840 100644
--- a/tools/testfiles/tref-escapes.h5
+++ b/tools/testfiles/tref-escapes.h5
Binary files differ
diff --git a/tools/testfiles/tref.h5 b/tools/testfiles/tref.h5
index 73b4be5..3a9899a 100644
--- a/tools/testfiles/tref.h5
+++ b/tools/testfiles/tref.h5
Binary files differ
diff --git a/tools/testfiles/tsaf.h5 b/tools/testfiles/tsaf.h5
index c84c5b6..75237ba 100644
--- a/tools/testfiles/tsaf.h5
+++ b/tools/testfiles/tsaf.h5
Binary files differ
diff --git a/tools/testfiles/tslink.h5 b/tools/testfiles/tslink.h5
index 753b62d..ab80d8f 100644
--- a/tools/testfiles/tslink.h5
+++ b/tools/testfiles/tslink.h5
Binary files differ
diff --git a/tools/testfiles/tsplit_file-m.h5 b/tools/testfiles/tsplit_file-m.h5
index a6eef73..d431c70 100644
--- a/tools/testfiles/tsplit_file-m.h5
+++ b/tools/testfiles/tsplit_file-m.h5
Binary files differ
diff --git a/tools/testfiles/tstr.h5 b/tools/testfiles/tstr.h5
index af5384f..ae6a012 100644
--- a/tools/testfiles/tstr.h5
+++ b/tools/testfiles/tstr.h5
Binary files differ
diff --git a/tools/testfiles/tstr2.h5 b/tools/testfiles/tstr2.h5
index d3b1588..b7d4802 100644
--- a/tools/testfiles/tstr2.h5
+++ b/tools/testfiles/tstr2.h5
Binary files differ
diff --git a/tools/testfiles/tstr3.h5 b/tools/testfiles/tstr3.h5
index 9f9112f..727dec5 100644
--- a/tools/testfiles/tstr3.h5
+++ b/tools/testfiles/tstr3.h5
Binary files differ
diff --git a/tools/testfiles/tstring-at.h5 b/tools/testfiles/tstring-at.h5
index aa17960..a477ee5 100644
--- a/tools/testfiles/tstring-at.h5
+++ b/tools/testfiles/tstring-at.h5
Binary files differ
diff --git a/tools/testfiles/tstring.h5 b/tools/testfiles/tstring.h5
index 667fe78..914a464 100644
--- a/tools/testfiles/tstring.h5
+++ b/tools/testfiles/tstring.h5
Binary files differ
diff --git a/tools/testfiles/tvldtypes1.h5 b/tools/testfiles/tvldtypes1.h5
index b5b2bd7..423dae8 100644
--- a/tools/testfiles/tvldtypes1.h5
+++ b/tools/testfiles/tvldtypes1.h5
Binary files differ
diff --git a/tools/testfiles/tvldtypes2.h5 b/tools/testfiles/tvldtypes2.h5
index 65bf63f..86ba20d 100644
--- a/tools/testfiles/tvldtypes2.h5
+++ b/tools/testfiles/tvldtypes2.h5
Binary files differ
diff --git a/tools/testfiles/tvldtypes3.h5 b/tools/testfiles/tvldtypes3.h5
index dd4aca7..66fd1d2 100644
--- a/tools/testfiles/tvldtypes3.h5
+++ b/tools/testfiles/tvldtypes3.h5
Binary files differ
diff --git a/tools/testfiles/tvldtypes4.h5 b/tools/testfiles/tvldtypes4.h5
index 70dc73e..016ad55 100644
--- a/tools/testfiles/tvldtypes4.h5
+++ b/tools/testfiles/tvldtypes4.h5
Binary files differ
diff --git a/tools/testfiles/tvldtypes5.h5 b/tools/testfiles/tvldtypes5.h5
index 776c020..71b924f 100644
--- a/tools/testfiles/tvldtypes5.h5
+++ b/tools/testfiles/tvldtypes5.h5
Binary files differ
diff --git a/tools/testfiles/tvms.h5 b/tools/testfiles/tvms.h5
index 9c243ff..d3b0b7c 100644
--- a/tools/testfiles/tvms.h5
+++ b/tools/testfiles/tvms.h5
Binary files differ