summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt720
-rw-r--r--src/H5AC.c768
-rw-r--r--src/H5ACpkg.h18
-rw-r--r--src/H5ACprivate.h13
-rw-r--r--src/H5B.c9
-rw-r--r--src/H5B2cache.c32
-rw-r--r--src/H5B2hdr.c13
-rw-r--r--src/H5B2pkg.h5
-rw-r--r--src/H5B2stat.c2
-rw-r--r--src/H5Bdbg.c3
-rw-r--r--src/H5C.c1188
-rw-r--r--src/H5Cpkg.h85
-rw-r--r--src/H5Cprivate.h35
-rw-r--r--src/H5Dchunk.c934
-rw-r--r--src/H5Dcompact.c2
-rw-r--r--src/H5Dcontig.c2
-rw-r--r--src/H5Dfill.c51
-rw-r--r--src/H5Dint.c8
-rw-r--r--src/H5Dmpio.c22
-rw-r--r--src/H5Dpkg.h14
-rw-r--r--src/H5Dproxy.c2
-rw-r--r--src/H5EAhdr.c2
-rw-r--r--src/H5Edefin.h4
-rw-r--r--src/H5Einit.h12
-rw-r--r--src/H5Epubgen.h6
-rw-r--r--src/H5Eterm.h4
-rw-r--r--src/H5F.c2
-rw-r--r--src/H5FAhdr.c2
-rw-r--r--src/H5FDstdio.c3
-rw-r--r--src/H5FS.c19
-rw-r--r--src/H5FScache.c36
-rw-r--r--src/H5FSdbg.c2
-rw-r--r--src/H5FSpkg.h6
-rw-r--r--src/H5FSsection.c2
-rw-r--r--src/H5Fpublic.h1
-rw-r--r--src/H5Fsuper.c10
-rw-r--r--src/H5Gcache.c37
-rw-r--r--src/H5Gnode.c41
-rw-r--r--src/H5Gpkg.h19
-rw-r--r--src/H5Groot.c2
-rw-r--r--src/H5HF.c36
-rw-r--r--src/H5HFcache.c26
-rw-r--r--src/H5HFdbg.c52
-rw-r--r--src/H5HFdblock.c6
-rw-r--r--src/H5HFhdr.c49
-rw-r--r--src/H5HFiblock.c28
-rw-r--r--src/H5HFpkg.h16
-rw-r--r--src/H5HG.c150
-rw-r--r--src/H5HGcache.c1
-rw-r--r--src/H5HGdbg.c4
-rw-r--r--src/H5HGpkg.h1
-rw-r--r--src/H5HL.c48
-rw-r--r--src/H5HLcache.c2
-rw-r--r--src/H5HLpkg.h1
-rw-r--r--src/H5MF.c2
-rw-r--r--src/H5O.c23
-rw-r--r--src/H5Oalloc.c135
-rw-r--r--src/H5Oattribute.c20
-rw-r--r--src/H5Ocache.c14
-rw-r--r--src/H5Ochunk.c69
-rw-r--r--src/H5Ofill.c1
-rw-r--r--src/H5Omessage.c10
-rw-r--r--src/H5Opkg.h6
-rw-r--r--src/H5Pdcpl.c2
-rw-r--r--src/H5Pfapl.c8
-rw-r--r--src/H5Ppublic.h2
-rwxr-xr-xsrc/H5SM.c123
-rw-r--r--src/H5SMcache.c66
-rwxr-xr-xsrc/H5SMpkg.h42
-rw-r--r--src/H5SMtest.c6
-rw-r--r--src/H5Tconv.c30
-rw-r--r--src/H5Tprivate.h1
-rw-r--r--src/H5Tvlen.c41
-rw-r--r--src/H5api_adpt.h281
-rw-r--r--src/H5config.h.in6
-rw-r--r--src/H5detect.c19
-rw-r--r--src/H5err.txt4
-rw-r--r--src/H5private.h6
-rw-r--r--src/H5public.h4
-rw-r--r--src/Makefile.in4
-rw-r--r--src/libhdf5.settings.in2
81 files changed, 2611 insertions, 2872 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
new file mode 100644
index 0000000..1e05424
--- /dev/null
+++ b/src/CMakeLists.txt
@@ -0,0 +1,720 @@
+cmake_minimum_required (VERSION 2.8)
+PROJECT (HDF5_SRC C CXX)
+
+#-----------------------------------------------------------------------------
+# List Source Files
+#-----------------------------------------------------------------------------
+SET (H5_SRCS
+ ${HDF5_SRC_DIR}/H5.c
+ ${HDF5_SRC_DIR}/H5checksum.c
+ ${HDF5_SRC_DIR}/H5dbg.c
+ ${HDF5_SRC_DIR}/H5system.c
+ ${HDF5_SRC_DIR}/H5timer.c
+ ${HDF5_SRC_DIR}/H5trace.c
+)
+
+SET (H5_HDRS
+ ${HDF5_SRC_DIR}/hdf5.h
+ ${HDF5_SRC_DIR}/H5api_adpt.h
+ ${HDF5_SRC_DIR}/H5public.h
+ ${HDF5_SRC_DIR}/H5version.h
+ ${HDF5_SRC_DIR}/H5overflow.h
+)
+IDE_GENERATED_PROPERTIES ("H5" "${H5_HDRS}" "${H5_SRCS}" )
+
+SET (H5A_SRCS
+ ${HDF5_SRC_DIR}/H5A.c
+ ${HDF5_SRC_DIR}/H5Abtree2.c
+ ${HDF5_SRC_DIR}/H5Adense.c
+ ${HDF5_SRC_DIR}/H5Adeprec.c
+ ${HDF5_SRC_DIR}/H5Aint.c
+ ${HDF5_SRC_DIR}/H5Atest.c
+)
+
+SET (H5A_HDRS
+ ${HDF5_SRC_DIR}/H5Apkg.h
+ ${HDF5_SRC_DIR}/H5Apublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5A" "${H5A_HDRS}" "${H5A_SRCS}" )
+
+SET (H5AC_SRCS
+ ${HDF5_SRC_DIR}/H5AC.c
+)
+
+SET (H5AC_HDRS
+ ${HDF5_SRC_DIR}/H5ACpkg.h
+ ${HDF5_SRC_DIR}/H5ACpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5AC" "${H5AC_HDRS}" "${H5AC_SRCS}" )
+
+SET (H5B_SRCS
+ ${HDF5_SRC_DIR}/H5B.c
+ ${HDF5_SRC_DIR}/H5B2.c
+ ${HDF5_SRC_DIR}/H5B2cache.c
+ ${HDF5_SRC_DIR}/H5B2dbg.c
+ ${HDF5_SRC_DIR}/H5B2hdr.c
+ ${HDF5_SRC_DIR}/H5B2int.c
+ ${HDF5_SRC_DIR}/H5B2stat.c
+ ${HDF5_SRC_DIR}/H5B2test.c
+ ${HDF5_SRC_DIR}/H5Bcache.c
+ ${HDF5_SRC_DIR}/H5Bdbg.c
+)
+
+SET (H5B_HDRS
+ ${HDF5_SRC_DIR}/H5B2pkg.h
+ ${HDF5_SRC_DIR}/H5B2public.h
+ ${HDF5_SRC_DIR}/H5Bpkg.h
+ ${HDF5_SRC_DIR}/H5Bpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5B" "${H5B_HDRS}" "${H5B_SRCS}" )
+
+SET (H5C_SRCS
+ ${HDF5_SRC_DIR}/H5C.c
+ ${HDF5_SRC_DIR}/H5CS.c
+)
+
+SET (H5C_HDRS
+ ${HDF5_SRC_DIR}/H5Cpkg.h
+ ${HDF5_SRC_DIR}/H5Cpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5C" "${H5C_HDRS}" "${H5C_SRCS}" )
+
+SET (H5D_SRCS
+ ${HDF5_SRC_DIR}/H5D.c
+ ${HDF5_SRC_DIR}/H5Dbtree.c
+ ${HDF5_SRC_DIR}/H5Dchunk.c
+ ${HDF5_SRC_DIR}/H5Dcompact.c
+ ${HDF5_SRC_DIR}/H5Dcontig.c
+ ${HDF5_SRC_DIR}/H5Ddbg.c
+ ${HDF5_SRC_DIR}/H5Ddeprec.c
+ ${HDF5_SRC_DIR}/H5Defl.c
+ ${HDF5_SRC_DIR}/H5Dfill.c
+ ${HDF5_SRC_DIR}/H5Dint.c
+ ${HDF5_SRC_DIR}/H5Dio.c
+ ${HDF5_SRC_DIR}/H5Dlayout.c
+ ${HDF5_SRC_DIR}/H5Dmpio.c
+ ${HDF5_SRC_DIR}/H5Doh.c
+ ${HDF5_SRC_DIR}/H5Dscatgath.c
+ ${HDF5_SRC_DIR}/H5Dselect.c
+ ${HDF5_SRC_DIR}/H5Dtest.c
+)
+
+SET (H5D_HDRS
+ ${HDF5_SRC_DIR}/H5Dpkg.h
+ ${HDF5_SRC_DIR}/H5Dpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5D" "${H5D_HDRS}" "${H5D_SRCS}" )
+
+SET (H5E_SRCS
+ ${HDF5_SRC_DIR}/H5E.c
+ ${HDF5_SRC_DIR}/H5Edeprec.c
+ ${HDF5_SRC_DIR}/H5Eint.c
+)
+
+SET (H5E_HDRS
+ ${HDF5_SRC_DIR}/H5Edefin.h
+ ${HDF5_SRC_DIR}/H5Einit.h
+ ${HDF5_SRC_DIR}/H5Epkg.h
+ ${HDF5_SRC_DIR}/H5Epubgen.h
+ ${HDF5_SRC_DIR}/H5Epublic.h
+ ${HDF5_SRC_DIR}/H5Eterm.h
+)
+IDE_GENERATED_PROPERTIES ("H5E" "${H5E_HDRS}" "${H5E_SRCS}" )
+
+SET (H5EA_SRCS
+ ${HDF5_SRC_DIR}/H5EA.c
+ ${HDF5_SRC_DIR}/H5EAcache.c
+ ${HDF5_SRC_DIR}/H5EAdbg.c
+ ${HDF5_SRC_DIR}/H5EAdblkpage.c
+ ${HDF5_SRC_DIR}/H5EAdblock.c
+ ${HDF5_SRC_DIR}/H5EAhdr.c
+ ${HDF5_SRC_DIR}/H5EAiblock.c
+ ${HDF5_SRC_DIR}/H5EAint.c
+ ${HDF5_SRC_DIR}/H5EAsblock.c
+ ${HDF5_SRC_DIR}/H5EAstat.c
+ ${HDF5_SRC_DIR}/H5EAtest.c
+)
+
+SET (H5EA_HDRS
+ ${HDF5_SRC_DIR}/H5EApkg.h
+)
+IDE_GENERATED_PROPERTIES ("H5EA" "${H5EA_HDRS}" "${H5EA_SRCS}" )
+
+SET (H5F_SRCS
+ ${HDF5_SRC_DIR}/H5F.c
+ ${HDF5_SRC_DIR}/H5Faccum.c
+ ${HDF5_SRC_DIR}/H5Fdbg.c
+ ${HDF5_SRC_DIR}/H5Fdeprec.c
+ ${HDF5_SRC_DIR}/H5Ffake.c
+ ${HDF5_SRC_DIR}/H5Fio.c
+ ${HDF5_SRC_DIR}/H5Fmount.c
+ ${HDF5_SRC_DIR}/H5Fmpi.c
+ ${HDF5_SRC_DIR}/H5Fquery.c
+ ${HDF5_SRC_DIR}/H5Fsfile.c
+ ${HDF5_SRC_DIR}/H5Fsuper.c
+ ${HDF5_SRC_DIR}/H5Fsuper_cache.c
+ ${HDF5_SRC_DIR}/H5Ftest.c
+)
+
+SET (H5F_HDRS
+ ${HDF5_SRC_DIR}/H5Fpkg.h
+ ${HDF5_SRC_DIR}/H5Fpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5F" "${H5F_HDRS}" "${H5F_SRCS}" )
+
+SET (H5FA_SRCS
+ ${HDF5_SRC_DIR}/H5FA.c
+ ${HDF5_SRC_DIR}/H5FAcache.c
+ ${HDF5_SRC_DIR}/H5FAdbg.c
+ ${HDF5_SRC_DIR}/H5FAdblock.c
+ ${HDF5_SRC_DIR}/H5FAdblkpage.c
+ ${HDF5_SRC_DIR}/H5FAhdr.c
+ ${HDF5_SRC_DIR}/H5FAstat.c
+ ${HDF5_SRC_DIR}/H5FAtest.c
+)
+
+SET (H5FA_HDRS
+ ${HDF5_SRC_DIR}/H5FApkg.h
+)
+IDE_GENERATED_PROPERTIES ("H5FA" "${H5FA_HDRS}" "${H5FA_SRCS}" )
+
+SET (H5FD_SRCS
+ ${HDF5_SRC_DIR}/H5FD.c
+ ${HDF5_SRC_DIR}/H5FDcore.c
+ ${HDF5_SRC_DIR}/H5FDdirect.c
+ ${HDF5_SRC_DIR}/H5FDfamily.c
+ ${HDF5_SRC_DIR}/H5FDint.c
+ ${HDF5_SRC_DIR}/H5FDlog.c
+ ${HDF5_SRC_DIR}/H5FDmpi.c
+ ${HDF5_SRC_DIR}/H5FDmpio.c
+ ${HDF5_SRC_DIR}/H5FDmpiposix.c
+ ${HDF5_SRC_DIR}/H5FDmulti.c
+ ${HDF5_SRC_DIR}/H5FDsec2.c
+ ${HDF5_SRC_DIR}/H5FDspace.c
+ ${HDF5_SRC_DIR}/H5FDstdio.c
+)
+
+SET (H5FD_HDRS
+ ${HDF5_SRC_DIR}/H5FDcore.h
+ ${HDF5_SRC_DIR}/H5FDdirect.h
+ ${HDF5_SRC_DIR}/H5FDfamily.h
+ ${HDF5_SRC_DIR}/H5FDlog.h
+ ${HDF5_SRC_DIR}/H5FDmpi.h
+ ${HDF5_SRC_DIR}/H5FDmpio.h
+ ${HDF5_SRC_DIR}/H5FDmpiposix.h
+ ${HDF5_SRC_DIR}/H5FDmulti.h
+ ${HDF5_SRC_DIR}/H5FDpkg.h
+ ${HDF5_SRC_DIR}/H5FDpublic.h
+ ${HDF5_SRC_DIR}/H5FDsec2.h
+ ${HDF5_SRC_DIR}/H5FDstdio.h
+)
+IDE_GENERATED_PROPERTIES ("H5FD" "${H5FD_HDRS}" "${H5FD_SRCS}" )
+
+SET (H5FS_SRCS
+ ${HDF5_SRC_DIR}/H5FS.c
+ ${HDF5_SRC_DIR}/H5FScache.c
+ ${HDF5_SRC_DIR}/H5FSdbg.c
+ ${HDF5_SRC_DIR}/H5FSsection.c
+ ${HDF5_SRC_DIR}/H5FSstat.c
+ ${HDF5_SRC_DIR}/H5FStest.c
+)
+
+SET (H5FS_HDRS
+ ${HDF5_SRC_DIR}/H5FSpkg.h
+ ${HDF5_SRC_DIR}/H5FSpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5FS" "${H5FS_HDRS}" "${H5FS_SRCS}" )
+
+SET (H5G_SRCS
+ ${HDF5_SRC_DIR}/H5G.c
+ ${HDF5_SRC_DIR}/H5Gbtree2.c
+ ${HDF5_SRC_DIR}/H5Gcache.c
+ ${HDF5_SRC_DIR}/H5Gcompact.c
+ ${HDF5_SRC_DIR}/H5Gdense.c
+ ${HDF5_SRC_DIR}/H5Gdeprec.c
+ ${HDF5_SRC_DIR}/H5Gent.c
+ ${HDF5_SRC_DIR}/H5Gint.c
+ ${HDF5_SRC_DIR}/H5Glink.c
+ ${HDF5_SRC_DIR}/H5Gloc.c
+ ${HDF5_SRC_DIR}/H5Gname.c
+ ${HDF5_SRC_DIR}/H5Gnode.c
+ ${HDF5_SRC_DIR}/H5Gobj.c
+ ${HDF5_SRC_DIR}/H5Goh.c
+ ${HDF5_SRC_DIR}/H5Groot.c
+ ${HDF5_SRC_DIR}/H5Gstab.c
+ ${HDF5_SRC_DIR}/H5Gtest.c
+ ${HDF5_SRC_DIR}/H5Gtraverse.c
+)
+
+SET (H5G_HDRS
+ ${HDF5_SRC_DIR}/H5Gpkg.h
+ ${HDF5_SRC_DIR}/H5Gpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5G" "${H5G_HDRS}" "${H5G_SRCS}" )
+
+SET (H5HF_SRCS
+ ${HDF5_SRC_DIR}/H5HF.c
+ ${HDF5_SRC_DIR}/H5HFbtree2.c
+ ${HDF5_SRC_DIR}/H5HFcache.c
+ ${HDF5_SRC_DIR}/H5HFdbg.c
+ ${HDF5_SRC_DIR}/H5HFdblock.c
+ ${HDF5_SRC_DIR}/H5HFdtable.c
+ ${HDF5_SRC_DIR}/H5HFhdr.c
+ ${HDF5_SRC_DIR}/H5HFhuge.c
+ ${HDF5_SRC_DIR}/H5HFiblock.c
+ ${HDF5_SRC_DIR}/H5HFiter.c
+ ${HDF5_SRC_DIR}/H5HFman.c
+ ${HDF5_SRC_DIR}/H5HFsection.c
+ ${HDF5_SRC_DIR}/H5HFspace.c
+ ${HDF5_SRC_DIR}/H5HFstat.c
+ ${HDF5_SRC_DIR}/H5HFtest.c
+ ${HDF5_SRC_DIR}/H5HFtiny.c
+)
+
+SET (H5HF_HDRS
+ ${HDF5_SRC_DIR}/H5HFpkg.h
+ ${HDF5_SRC_DIR}/H5HFpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5HF" "${H5HF_HDRS}" "${H5HF_SRCS}" )
+
+SET (H5HG_SRCS
+ ${HDF5_SRC_DIR}/H5HG.c
+ ${HDF5_SRC_DIR}/H5HGcache.c
+ ${HDF5_SRC_DIR}/H5HGdbg.c
+)
+
+SET (H5HG_HDRS
+ ${HDF5_SRC_DIR}/H5HGpkg.h
+ ${HDF5_SRC_DIR}/H5HGpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5HG" "${H5HG_HDRS}" "${H5HG_SRCS}" )
+
+SET (H5HL_SRCS
+ ${HDF5_SRC_DIR}/H5HL.c
+ ${HDF5_SRC_DIR}/H5HLcache.c
+ ${HDF5_SRC_DIR}/H5HLdbg.c
+ ${HDF5_SRC_DIR}/H5HLint.c
+)
+
+SET (H5HL_HDRS
+ ${HDF5_SRC_DIR}/H5HLpkg.h
+ ${HDF5_SRC_DIR}/H5HLpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5HL" "${H5HL_HDRS}" "${H5HL_SRCS}" )
+
+SET (H5MF_SRCS
+ ${HDF5_SRC_DIR}/H5MF.c
+ ${HDF5_SRC_DIR}/H5MFaggr.c
+ ${HDF5_SRC_DIR}/H5MFdbg.c
+ ${HDF5_SRC_DIR}/H5MFsection.c
+)
+
+SET (H5MF_HDRS
+)
+IDE_GENERATED_PROPERTIES ("H5MF" "${H5MF_HDRS}" "${H5MF_SRCS}" )
+
+SET (H5MP_SRCS
+ ${HDF5_SRC_DIR}/H5MP.c
+ ${HDF5_SRC_DIR}/H5MPtest.c
+)
+
+SET (H5MP_HDRS
+ ${HDF5_SRC_DIR}/H5MPpkg.h
+)
+IDE_GENERATED_PROPERTIES ("H5MP" "${H5MP_HDRS}" "${H5MP_SRCS}" )
+
+SET (H5O_SRCS
+ ${HDF5_SRC_DIR}/H5O.c
+ ${HDF5_SRC_DIR}/H5Oainfo.c
+ ${HDF5_SRC_DIR}/H5Oalloc.c
+ ${HDF5_SRC_DIR}/H5Oattr.c
+ ${HDF5_SRC_DIR}/H5Oattribute.c
+ ${HDF5_SRC_DIR}/H5Obogus.c
+ ${HDF5_SRC_DIR}/H5Obtreek.c
+ ${HDF5_SRC_DIR}/H5Ocache.c
+ ${HDF5_SRC_DIR}/H5Ochunk.c
+ ${HDF5_SRC_DIR}/H5Ocont.c
+ ${HDF5_SRC_DIR}/H5Ocopy.c
+ ${HDF5_SRC_DIR}/H5Odbg.c
+ ${HDF5_SRC_DIR}/H5Odrvinfo.c
+ ${HDF5_SRC_DIR}/H5Odtype.c
+ ${HDF5_SRC_DIR}/H5Oefl.c
+ ${HDF5_SRC_DIR}/H5Ofill.c
+ ${HDF5_SRC_DIR}/H5Ofsinfo.c
+ ${HDF5_SRC_DIR}/H5Oginfo.c
+ ${HDF5_SRC_DIR}/H5Olayout.c
+ ${HDF5_SRC_DIR}/H5Olinfo.c
+ ${HDF5_SRC_DIR}/H5Olink.c
+ ${HDF5_SRC_DIR}/H5Omessage.c
+ ${HDF5_SRC_DIR}/H5Omtime.c
+ ${HDF5_SRC_DIR}/H5Oname.c
+ ${HDF5_SRC_DIR}/H5Onull.c
+ ${HDF5_SRC_DIR}/H5Opline.c
+ ${HDF5_SRC_DIR}/H5Orefcount.c
+ ${HDF5_SRC_DIR}/H5Osdspace.c
+ ${HDF5_SRC_DIR}/H5Oshared.c
+ ${HDF5_SRC_DIR}/H5Oshmesg.c
+ ${HDF5_SRC_DIR}/H5Ostab.c
+ ${HDF5_SRC_DIR}/H5Otest.c
+ ${HDF5_SRC_DIR}/H5Ounknown.c
+)
+
+SET (H5O_HDRS
+ ${HDF5_SRC_DIR}/H5Opkg.h
+ ${HDF5_SRC_DIR}/H5Opublic.h
+ ${HDF5_SRC_DIR}/H5Oshared.h
+)
+IDE_GENERATED_PROPERTIES ("H5O" "${H5O_HDRS}" "${H5O_SRCS}" )
+
+SET (H5P_SRCS
+ ${HDF5_SRC_DIR}/H5P.c
+ ${HDF5_SRC_DIR}/H5Pacpl.c
+ ${HDF5_SRC_DIR}/H5Pdapl.c
+ ${HDF5_SRC_DIR}/H5Pdcpl.c
+ ${HDF5_SRC_DIR}/H5Pdeprec.c
+ ${HDF5_SRC_DIR}/H5Pdxpl.c
+ ${HDF5_SRC_DIR}/H5Pfapl.c
+ ${HDF5_SRC_DIR}/H5Pfcpl.c
+ ${HDF5_SRC_DIR}/H5Pfmpl.c
+ ${HDF5_SRC_DIR}/H5Pgcpl.c
+ ${HDF5_SRC_DIR}/H5Pint.c
+ ${HDF5_SRC_DIR}/H5Plapl.c
+ ${HDF5_SRC_DIR}/H5Plcpl.c
+ ${HDF5_SRC_DIR}/H5Pocpl.c
+ ${HDF5_SRC_DIR}/H5Pocpypl.c
+ ${HDF5_SRC_DIR}/H5Pstrcpl.c
+ ${HDF5_SRC_DIR}/H5Ptest.c
+)
+
+SET (H5P_HDRS
+ ${HDF5_SRC_DIR}/H5Ppkg.h
+ ${HDF5_SRC_DIR}/H5Ppublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5P" "${H5P_HDRS}" "${H5P_SRCS}" )
+
+SET (H5S_SRCS
+ ${HDF5_SRC_DIR}/H5S.c
+ ${HDF5_SRC_DIR}/H5Sall.c
+ ${HDF5_SRC_DIR}/H5Sdbg.c
+ ${HDF5_SRC_DIR}/H5Shyper.c
+ ${HDF5_SRC_DIR}/H5Smpio.c
+ ${HDF5_SRC_DIR}/H5Snone.c
+ ${HDF5_SRC_DIR}/H5Spoint.c
+ ${HDF5_SRC_DIR}/H5Sselect.c
+ ${HDF5_SRC_DIR}/H5Stest.c
+)
+
+SET (H5S_HDRS
+ ${HDF5_SRC_DIR}/H5Spkg.h
+ ${HDF5_SRC_DIR}/H5Spublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5S" "${H5S_HDRS}" "${H5S_SRCS}" )
+
+SET (H5SM_SRCS
+ ${HDF5_SRC_DIR}/H5SM.c
+ ${HDF5_SRC_DIR}/H5SMbtree2.c
+ ${HDF5_SRC_DIR}/H5SMcache.c
+ ${HDF5_SRC_DIR}/H5SMmessage.c
+ ${HDF5_SRC_DIR}/H5SMtest.c
+)
+
+SET (H5SM_HDRS
+ ${HDF5_SRC_DIR}/H5SMpkg.h
+)
+IDE_GENERATED_PROPERTIES ("H5SM" "${H5SM_HDRS}" "${H5SM_SRCS}" )
+
+SET (H5T_SRCS
+ ${HDF5_SRC_DIR}/H5T.c
+ ${HDF5_SRC_DIR}/H5Tarray.c
+ ${HDF5_SRC_DIR}/H5Tbit.c
+ ${HDF5_SRC_DIR}/H5Tcommit.c
+ ${HDF5_SRC_DIR}/H5Tcompound.c
+ ${HDF5_SRC_DIR}/H5Tconv.c
+ ${HDF5_SRC_DIR}/H5Tcset.c
+ ${HDF5_SRC_DIR}/H5Tdbg.c
+ ${HDF5_SRC_DIR}/H5Tdeprec.c
+ ${HDF5_SRC_DIR}/H5Tenum.c
+ ${HDF5_SRC_DIR}/H5Tfields.c
+ ${HDF5_SRC_DIR}/H5Tfixed.c
+ ${HDF5_SRC_DIR}/H5Tfloat.c
+ ${HDF5_SRC_DIR}/H5Tnative.c
+ ${HDF5_SRC_DIR}/H5Toffset.c
+ ${HDF5_SRC_DIR}/H5Toh.c
+ ${HDF5_SRC_DIR}/H5Topaque.c
+ ${HDF5_SRC_DIR}/H5Torder.c
+ ${HDF5_SRC_DIR}/H5Tpad.c
+ ${HDF5_SRC_DIR}/H5Tprecis.c
+ ${HDF5_SRC_DIR}/H5Tstrpad.c
+ ${HDF5_SRC_DIR}/H5Tvisit.c
+ ${HDF5_SRC_DIR}/H5Tvlen.c
+)
+
+SET (H5T_HDRS
+ ${HDF5_SRC_DIR}/H5Tpkg.h
+ ${HDF5_SRC_DIR}/H5Tpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5T" "${H5T_HDRS}" "${H5T_SRCS}" )
+
+SET (H5Z_SRCS
+ ${HDF5_SRC_DIR}/H5Z.c
+ ${HDF5_SRC_DIR}/H5Zdeflate.c
+ ${HDF5_SRC_DIR}/H5Zfletcher32.c
+ ${HDF5_SRC_DIR}/H5Znbit.c
+ ${HDF5_SRC_DIR}/H5Zscaleoffset.c
+ ${HDF5_SRC_DIR}/H5Zshuffle.c
+ ${HDF5_SRC_DIR}/H5Zszip.c
+ ${HDF5_SRC_DIR}/H5Ztrans.c
+)
+
+SET (H5Z_HDRS
+ ${HDF5_SRC_DIR}/H5Zpkg.h
+ ${HDF5_SRC_DIR}/H5Zpublic.h
+)
+IDE_GENERATED_PROPERTIES ("H5Z" "${H5Z_HDRS}" "${H5Z_SRCS}" )
+
+SET (common_SRCS
+ ${H5_SRCS}
+ ${H5A_SRCS}
+ ${H5AC_SRCS}
+ ${H5B_SRCS}
+ ${H5C_SRCS}
+ ${H5D_SRCS}
+ ${H5E_SRCS}
+ ${H5EA_SRCS}
+ ${H5F_SRCS}
+ ${H5FA_SRCS}
+ ${H5FD_SRCS}
+ ${H5FS_SRCS}
+ ${H5G_SRCS}
+ ${H5HF_SRCS}
+ ${H5HG_SRCS}
+ ${H5HL_SRCS}
+ ${H5MF_SRCS}
+ ${H5MP_SRCS}
+ ${H5O_SRCS}
+ ${H5P_SRCS}
+ ${H5S_SRCS}
+ ${H5SM_SRCS}
+ ${H5T_SRCS}
+ ${H5Z_SRCS}
+ ${HDF5_SRC_DIR}/H5FL.c
+ ${HDF5_SRC_DIR}/H5FO.c
+ ${HDF5_SRC_DIR}/H5HP.c
+ ${HDF5_SRC_DIR}/H5I.c
+ ${HDF5_SRC_DIR}/H5L.c
+ ${HDF5_SRC_DIR}/H5Lexternal.c
+ ${HDF5_SRC_DIR}/H5MM.c
+ ${HDF5_SRC_DIR}/H5R.c
+ ${HDF5_SRC_DIR}/H5RC.c
+ ${HDF5_SRC_DIR}/H5Rdeprec.c
+ ${HDF5_SRC_DIR}/H5RS.c
+ ${HDF5_SRC_DIR}/H5SL.c
+ ${HDF5_SRC_DIR}/H5ST.c
+ ${HDF5_SRC_DIR}/H5TS.c
+ ${HDF5_SRC_DIR}/H5V.c
+ ${HDF5_SRC_DIR}/H5WB.c
+)
+
+SET (H5_PUBLIC_HEADERS
+ ${H5_HDRS}
+ ${H5A_HDRS}
+ ${H5AC_HDRS}
+ ${H5B_HDRS}
+ ${H5C_HDRS}
+ ${H5D_HDRS}
+ ${H5E_HDRS}
+ ${H5EA_HDRS}
+ ${H5F_HDRS}
+ ${H5FA_HDRS}
+ ${H5FD_HDRS}
+ ${H5FS_HDRS}
+ ${H5G_HDRS}
+ ${H5HF_HDRS}
+ ${H5HG_HDRS}
+ ${H5HL_HDRS}
+ ${H5MF_HDRS}
+ ${H5MP_HDRS}
+ ${H5O_HDRS}
+ ${H5P_HDRS}
+ ${H5S_HDRS}
+ ${H5SM_HDRS}
+ ${H5T_HDRS}
+ ${H5Z_HDRS}
+ ${HDF5_SRC_DIR}/H5Ipkg.h
+ ${HDF5_SRC_DIR}/H5Ipublic.h
+ ${HDF5_SRC_DIR}/H5Lpkg.h
+ ${HDF5_SRC_DIR}/H5Lpublic.h
+ ${HDF5_SRC_DIR}/H5MMpublic.h
+ ${HDF5_SRC_DIR}/H5Rpkg.h
+ ${HDF5_SRC_DIR}/H5Rpublic.h
+)
+
+# --------------------------------------------------------------------
+# If we are compiling on Windows then add the windows specific files
+# --------------------------------------------------------------------
+IF (WIN32)
+ SET (common_SRCS ${common_SRCS} ${HDF5_SRC_DIR}/H5FDwindows.c)
+ SET (H5_PUBLIC_HEADERS ${H5_PUBLIC_HEADERS} ${HDF5_SRC_DIR}/H5FDwindows.h)
+ENDIF (WIN32)
+
+SET (H5_PRIVATE_HEADERS
+ ${HDF5_SRC_DIR}/H5private.h
+ ${HDF5_SRC_DIR}/H5Aprivate.h
+ ${HDF5_SRC_DIR}/H5ACprivate.h
+ ${HDF5_SRC_DIR}/H5B2private.h
+ ${HDF5_SRC_DIR}/H5Bprivate.h
+ ${HDF5_SRC_DIR}/H5Cprivate.h
+ ${HDF5_SRC_DIR}/H5CSprivate.h
+ ${HDF5_SRC_DIR}/H5Dprivate.h
+ ${HDF5_SRC_DIR}/H5Eprivate.h
+ ${HDF5_SRC_DIR}/H5EAprivate.h
+ ${HDF5_SRC_DIR}/H5FAprivate.h
+ ${HDF5_SRC_DIR}/H5FDprivate.h
+ ${HDF5_SRC_DIR}/H5Fprivate.h
+ ${HDF5_SRC_DIR}/H5FLprivate.h
+ ${HDF5_SRC_DIR}/H5FOprivate.h
+ ${HDF5_SRC_DIR}/H5MFprivate.h
+ ${HDF5_SRC_DIR}/H5MMprivate.h
+ ${HDF5_SRC_DIR}/H5Cprivate.h
+ ${HDF5_SRC_DIR}/H5FSprivate.h
+ ${HDF5_SRC_DIR}/H5Gprivate.h
+ ${HDF5_SRC_DIR}/H5HFprivate.h
+ ${HDF5_SRC_DIR}/H5HGprivate.h
+ ${HDF5_SRC_DIR}/H5HLprivate.h
+ ${HDF5_SRC_DIR}/H5HPprivate.h
+ ${HDF5_SRC_DIR}/H5Iprivate.h
+ ${HDF5_SRC_DIR}/H5Lprivate.h
+ ${HDF5_SRC_DIR}/H5MPprivate.h
+ ${HDF5_SRC_DIR}/H5Oprivate.h
+ ${HDF5_SRC_DIR}/H5Pprivate.h
+ ${HDF5_SRC_DIR}/H5RCprivate.h
+ ${HDF5_SRC_DIR}/H5Rprivate.h
+ ${HDF5_SRC_DIR}/H5RSprivate.h
+ ${HDF5_SRC_DIR}/H5SLprivate.h
+ ${HDF5_SRC_DIR}/H5SMprivate.h
+ ${HDF5_SRC_DIR}/H5Sprivate.h
+ ${HDF5_SRC_DIR}/H5STprivate.h
+ ${HDF5_SRC_DIR}/H5Tprivate.h
+ ${HDF5_SRC_DIR}/H5TSprivate.h
+ ${HDF5_SRC_DIR}/H5Vprivate.h
+ ${HDF5_SRC_DIR}/H5WBprivate.h
+ ${HDF5_SRC_DIR}/H5Zprivate.h
+ ${HDF5_SRC_DIR}/H5win32defs.h
+)
+
+INCLUDE_DIRECTORIES (${HDF5_SOURCE_DIR})
+INCLUDE_DIRECTORIES (${CMAKE_BINARY_DIR})
+
+#-----------------------------------------------------------------------------
+# When building utility executables that generate other (source) files :
+# we make use of the following variables defined in the root CMakeLists.
+# Certain systems may add /Debug or /Release to output paths
+# and we need to call the executable from inside the CMake configuration
+#-----------------------------------------------------------------------------
+#inherit EXE_EXT from parent HDF5 cmake project
+#inherit CFG_INIT from parent HDF5 cmake project
+
+#-----------------------------------------------------------------------------
+# Setup the H5Detect utility which generates H5Tinit with platform
+# specific type checks inside
+#-----------------------------------------------------------------------------
+SET (CMD ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}${CFG_INIT}/H5detect${EXE_EXT})
+IF (XCODE)
+ SET (CMD "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/\${CONFIGURATION}/H5detect")
+ENDIF (XCODE)
+ADD_EXECUTABLE (H5detect ${HDF5_SRC_DIR}/H5detect.c)
+IF (MSVC)
+ TARGET_LINK_LIBRARIES (H5detect "ws2_32.lib")
+ENDIF (MSVC)
+
+ADD_CUSTOM_COMMAND (
+ OUTPUT ${HDF5_BINARY_DIR}/H5Tinit.c
+ COMMAND ${CMD}
+ ARGS > ${HDF5_BINARY_DIR}/H5Tinit.c
+ DEPENDS H5detect
+)
+
+SET (CMDL ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}${CFG_INIT}/H5make_libsettings${EXE_EXT})
+IF (XCODE)
+ SET (CMDL "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/\${CONFIGURATION}/H5make_libsettings")
+ENDIF (XCODE)
+ADD_EXECUTABLE (H5make_libsettings ${HDF5_SRC_DIR}/H5make_libsettings.c)
+IF (MSVC)
+ TARGET_LINK_LIBRARIES (H5make_libsettings "ws2_32.lib")
+ENDIF (MSVC)
+
+ADD_CUSTOM_COMMAND (
+ OUTPUT ${HDF5_BINARY_DIR}/H5lib_settings.c
+ COMMAND ${CMDL}
+ ARGS > ${HDF5_BINARY_DIR}/H5lib_settings.c
+ DEPENDS H5make_libsettings
+)
+
+ADD_CUSTOM_COMMAND (
+ TARGET ${HDF5_BINARY_DIR}/H5Edefin.h
+ PRE_BUILD
+ COMMAND ${CMAKE_COMMAND} -E perl ${HDF5_SOURCE_DIR}/bin/make_err ${HDF5_SOURCE_DIR}/src/H5err.txt
+ COMMENT " Creating err headers"
+)
+
+ADD_CUSTOM_COMMAND (
+ TARGET ${HDF5_BINARY_DIR}/H5version.h
+ PRE_BUILD
+ COMMAND ${CMAKE_COMMAND} -E perl ${HDF5_SOURCE_DIR}/bin/make_vers ${HDF5_SOURCE_DIR}/src/H5vers.txt
+ COMMENT " Creating API version macro"
+)
+
+ADD_CUSTOM_COMMAND (
+ TARGET ${HDF5_BINARY_DIR}/H5overflow.h
+ PRE_BUILD
+ COMMAND ${CMAKE_COMMAND} -E perl ${HDF5_SOURCE_DIR}/bin/make_overflow ${HDF5_SOURCE_DIR}/src/H5overflow.txt
+ COMMENT " Creating Assignment overflow macro"
+)
+
+#-----------------------------------------------------------------------------
+# Add H5Tinit source to build - generated by H5Detect/CMake at configure time
+#-----------------------------------------------------------------------------
+SET (common_SRCS ${common_SRCS} ${HDF5_BINARY_DIR}/H5Tinit.c)
+SET_SOURCE_FILES_PROPERTIES (${HDF5_BINARY_DIR}/H5Tinit.c GENERATED)
+SET (common_SRCS ${common_SRCS} ${HDF5_BINARY_DIR}/H5lib_settings.c)
+SET_SOURCE_FILES_PROPERTIES (${HDF5_BINARY_DIR}/H5lib_settings.c GENERATED)
+SET (common_SRCS ${common_SRCS} ${HDF5_BINARY_DIR}/H5Edefin.h)
+SET_SOURCE_FILES_PROPERTIES (${HDF5_BINARY_DIR}/H5Edefin.h GENERATED)
+SET (common_SRCS ${common_SRCS} ${HDF5_BINARY_DIR}/H5version.h)
+SET_SOURCE_FILES_PROPERTIES (${HDF5_BINARY_DIR}/H5version.h GENERATED)
+SET (common_SRCS ${common_SRCS} ${HDF5_BINARY_DIR}/H5overflow.h)
+SET_SOURCE_FILES_PROPERTIES (${HDF5_BINARY_DIR}/H5overflow.h GENERATED)
+
+ADD_LIBRARY (${HDF5_LIB_TARGET} ${LIB_TYPE} ${common_SRCS} ${H5_PUBLIC_HEADERS} ${H5_PRIVATE_HEADERS})
+SET_TARGET_PROPERTIES(${HDF5_LIB_TARGET} PROPERTIES DEFINE_SYMBOL ${HDF5_LIB_CORENAME}_EXPORTS)
+TARGET_LINK_LIBRARIES (${HDF5_LIB_TARGET} ${LINK_LIBS})
+SET_GLOBAL_VARIABLE (HDF5_LIBRARIES_TO_EXPORT ${HDF5_LIB_TARGET})
+H5_SET_LIB_OPTIONS (${HDF5_LIB_TARGET} ${HDF5_LIB_NAME} ${LIB_TYPE})
+
+#-----------------------------------------------------------------------------
+# Add file(s) to CMake Install
+#-----------------------------------------------------------------------------
+INSTALL (
+ FILES
+ ${H5_PUBLIC_HEADERS}
+ ${H5_PRIVATE_HEADERS}
+ DESTINATION
+ include
+ COMPONENT
+ headers
+)
+
+#-----------------------------------------------------------------------------
+# Add Target(s) to CMake Install for import into other projects
+#-----------------------------------------------------------------------------
+IF (HDF5_EXPORTED_TARGETS)
+ INSTALL (
+ TARGETS
+ ${HDF5_LIB_TARGET}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ LIBRARY DESTINATION lib COMPONENT libraries
+ ARCHIVE DESTINATION lib COMPONENT libraries
+ RUNTIME DESTINATION bin COMPONENT libraries
+ )
+ENDIF (HDF5_EXPORTED_TARGETS)
diff --git a/src/H5AC.c b/src/H5AC.c
index d5d7589..76e43dd 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -24,22 +24,6 @@
* with a particular HDF file share the same cache; each
* HDF file has it's own cache.
*
- * Modifications:
- *
- * Robb Matzke, 4 Aug 1997
- * Added calls to H5E.
- *
- * Quincey Koziol, 22 Apr 2000
- * Turned on "H5AC_SORT_BY_ADDR"
- *
- * John Mainzer, 5/19/04
- * Complete redesign and rewrite. See the header comments for
- * H5AC_t for an overview of what is going on.
- *
- * John Mainzer, 6/4/04
- * Factored the new cache code into a separate file (H5C.c) to
- * facilitate re-use. Re-worked this file again to use H5C.
- *
*-------------------------------------------------------------------------
*/
@@ -91,7 +75,7 @@ H5FL_DEFINE_STATIC(H5AC_aux_t);
*
* addr: file offset of a metadata entry. Entries are added to this
* list (if they aren't there already) when they are marked
- * dirty in an unprotect, inserted, or renamed. They are
+ * dirty in an unprotect, inserted, or moved. They are
* removed when they appear in a clean entries broadcast.
*
****************************************************************************/
@@ -155,9 +139,7 @@ static herr_t H5AC_log_deleted_entry(H5AC_t * cache_ptr,
unsigned int flags);
static herr_t H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
- haddr_t addr,
- hbool_t size_changed,
- size_t new_size);
+ haddr_t addr);
static herr_t H5AC_log_flushed_entry(H5C_t * cache_ptr,
haddr_t addr,
@@ -175,9 +157,7 @@ static herr_t H5AC_log_flushed_entry_dummy(H5C_t * cache_ptr,
static herr_t H5AC_log_inserted_entry(H5F_t * f,
H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr,
- const H5AC_class_t * type,
- haddr_t addr);
+ H5AC_info_t * entry_ptr);
static herr_t H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
hid_t dxpl_id,
@@ -189,7 +169,7 @@ static herr_t H5AC_receive_and_apply_clean_list(H5F_t * f,
hid_t secondary_dxpl_id,
H5AC_t * cache_ptr);
-static herr_t H5AC_log_renamed_entry(const H5F_t * f,
+static herr_t H5AC_log_moved_entry(const H5F_t * f,
haddr_t old_addr,
haddr_t new_addr);
@@ -209,8 +189,6 @@ static herr_t H5AC_flush_entries(H5F_t *f);
* Programmer: Quincey Koziol
* Saturday, January 18, 2003
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -236,8 +214,6 @@ done:
* Programmer: Quincey Koziol
* Thursday, July 18, 2002
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -357,8 +333,6 @@ done:
* Programmer: Quincey Koziol
* Thursday, July 18, 2002
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
int
@@ -420,57 +394,6 @@ H5AC_term_interface(void)
* matzke@llnl.gov
* Jul 9 1997
*
- * Modifications:
- *
- * Complete re-design and re-write to support the re-designed
- * metadata cache.
- *
- * At present, the size_hint is ignored, and the
- * max_cache_size and min_clean_size fields are hard
- * coded. This should be fixed, but a parameter
- * list change will be required, so I will leave it
- * for now.
- *
- * Since no-one seems to care, the function now returns
- * one on success.
- * JRM - 4/28/04
- *
- * Reworked the function again after abstracting its guts to
- * the similar function in H5C.c. The function is now a
- * wrapper for H5C_create().
- * JRM - 6/4/04
- *
- * Deleted the old size_hint parameter and added the
- * max_cache_size, and min_clean_size parameters.
- *
- * JRM - 3/10/05
- *
- * Deleted the max_cache_size, and min_clean_size parameters,
- * and added the config_ptr parameter. Added code to
- * validate the resize configuration before we do anything.
- *
- * JRM - 3/24/05
- *
- * Changed the type of config_ptr from H5AC_auto_size_ctl_t *
- * to H5AC_cache_config_t *. Propagated associated changes
- * through the function.
- * JRM - 4/7/05
- *
- * Added code allocating and initializing the auxilary
- * structure (an instance of H5AC_aux_t), and linking it
- * to the instance of H5C_t created by H5C_create(). At
- * present, the auxilary structure is only used in PHDF5.
- *
- * JRM - 6/28/05
- *
- * Added code to set the prefix if required.
- *
- * JRM - 1/20/06
- *
- * Added code to initialize the new write_done field.
- *
- * JRM - 5/11/06
- *
*-------------------------------------------------------------------------
*/
@@ -581,8 +504,8 @@ H5AC_create(const H5F_t *f,
aux_ptr->unprotect_dirty_bytes_updates = 0;
aux_ptr->insert_dirty_bytes = 0;
aux_ptr->insert_dirty_bytes_updates = 0;
- aux_ptr->rename_dirty_bytes = 0;
- aux_ptr->rename_dirty_bytes_updates = 0;
+ aux_ptr->move_dirty_bytes = 0;
+ aux_ptr->move_dirty_bytes_updates = 0;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
aux_ptr->d_slist_ptr = NULL;
aux_ptr->d_slist_len = 0;
@@ -815,12 +738,6 @@ done:
* Programmer: John Mainzer
* 6/30/06
*
- * Modifications:
- *
- * Added 'flags' paramater, to allow freeing file space
- *
- * QAK - 2/5/08
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -867,7 +784,7 @@ H5AC_expunge_entry(H5F_t *f,
#endif /* H5AC__TRACE_FILE_ENABLED */
result = H5C_expunge_entry(f,
- dxpl_id,
+ dxpl_id,
H5AC_noblock_dxpl_id,
type,
addr,
@@ -979,10 +896,6 @@ done:
* Programmer: John Mainzer
* 4/27/06
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1064,60 +977,12 @@ done:
* matzke@llnl.gov
* Jul 9 1997
*
- * Modifications:
- * Robb Matzke, 1999-07-27
- * The ADDR argument is passed by value.
- *
- * Bill Wendling, 2003-09-16
- * Added automatic "flush" if the FPHDF5 driver is being
- * used. This'll write the metadata to the SAP where other,
- * lesser processes can grab it.
- *
- * JRM - 5/13/04
- * Complete re-write for the new metadata cache. The new
- * code is functionally almost identical to the old, although
- * the sanity check for a protected entry is now an assert
- * at the beginning of the function.
- *
- * JRM - 6/7/04
- * Abstracted the guts of the function to H5C_insert_entry()
- * in H5C.c, and then re-wrote the function as a wrapper for
- * H5C_insert_entry().
- *
- * JRM - 1/6/05
- * Added the flags parameter. At present, this parameter is
- * only used to set the new flush_marker field on the new
- * entry. Since this doesn't apply to the SAP code, no change
- * is needed there. Thus the only change to the body of the
- * code is to pass the flags parameter through to
- * H5C_insert_entry().
- *
- * JRM - 6/6/05
- * Added code to force newly inserted entries to be dirty
- * in the flexible parallel case. The normal case is handled
- * in H5C.c. This is part of a series of changes directed at
- * moving management of the dirty flag on cache entries into
- * the cache code.
- *
- * JRM - 7/5/05
- * Added code to track dirty byte generation, and to trigger
- * clean entry list propagation when it exceeds a user
- * specified threshold. Note that this code only applies in
- * the PHDF5 case. It should have no effect on either the
- * serial or FPHSD5 cases.
- *
- * JRM - 6/6/06
- * Added trace file support.
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
void *thing, unsigned int flags)
{
- herr_t result;
- H5AC_info_t *info;
#ifdef H5_HAVE_PARALLEL
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
@@ -1164,74 +1029,43 @@ H5AC_set(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
}
#endif /* H5AC__TRACE_FILE_ENABLED */
- /* Get local copy of this information */
- info = (H5AC_info_t *)thing;
-
- info->addr = addr;
- info->type = type;
- info->is_protected = FALSE;
-
-#ifdef H5_HAVE_PARALLEL
- if ( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) {
-
- result = H5AC_log_inserted_entry(f,
- f->shared->cache,
- (H5AC_info_t *)thing,
- type,
- addr);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, \
- "H5AC_log_inserted_entry() failed.")
- }
- }
-#endif /* H5_HAVE_PARALLEL */
-
- result = H5C_insert_entry(f,
- dxpl_id,
- H5AC_noblock_dxpl_id,
- type,
- addr,
- thing,
- flags);
-
- if ( result < 0 ) {
-
+ /* Insert entry into metadata cache */
+ if(H5C_insert_entry(f, dxpl_id, H5AC_noblock_dxpl_id, type, addr, thing, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_insert_entry() failed")
- }
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
+ if(trace_file_ptr != NULL) {
/* make note of the entry size */
trace_entry_size = ((H5C_cache_entry_t *)thing)->size;
}
#endif /* H5AC__TRACE_FILE_ENABLED */
#ifdef H5_HAVE_PARALLEL
- /* Check if we should try to flush */
- if(aux_ptr && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)) {
- hbool_t evictions_enabled;
-
- /* Query if evictions are allowed */
- if(H5C_get_evictions_enabled((const H5C_t *)f->shared->cache, &evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
-
- /* Flush if evictions are allowed */
- if(evictions_enabled) {
- if(H5AC_propagate_flushed_and_still_clean_entries_list(f,
- H5AC_noblock_dxpl_id, f->shared->cache, TRUE) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
+ if(NULL != (aux_ptr = f->shared->cache->aux_ptr)) {
+ if(H5AC_log_inserted_entry(f, f->shared->cache, (H5AC_info_t *)thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5AC_log_inserted_entry() failed")
+
+ /* Check if we should try to flush */
+ if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold) {
+ hbool_t evictions_enabled;
+
+ /* Query if evictions are allowed */
+ if(H5C_get_evictions_enabled((const H5C_t *)f->shared->cache, &evictions_enabled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_get_evictions_enabled() failed.")
+
+ /* Flush if evictions are allowed */
+ if(evictions_enabled) {
+ if(H5AC_propagate_flushed_and_still_clean_entries_list(f,
+ H5AC_noblock_dxpl_id, f->shared->cache, TRUE) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't propagate clean entries list.")
+ } /* end if */
} /* end if */
} /* end if */
#endif /* H5_HAVE_PARALLEL */
done:
-
#if H5AC__TRACE_FILE_ENABLED
- if ( trace_file_ptr != NULL ) {
-
+ if(trace_file_ptr != NULL) {
HDfprintf(trace_file_ptr, "%s %d %d\n", trace,
(int)trace_entry_size,
(int)ret_value);
@@ -1239,96 +1073,15 @@ done:
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5AC_set() */
/*-------------------------------------------------------------------------
- * Function: H5AC_mark_pinned_entry_dirty
- *
- * Purpose: Mark a pinned entry as dirty. The target entry MUST be
- * be pinned, and MUST be unprotected.
- *
- * If the entry has changed size, the function updates
- * data structures for the size change.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 4/11/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5AC_mark_pinned_entry_dirty(void *thing, hbool_t size_changed, size_t new_size)
-{
-#if H5AC__TRACE_FILE_ENABLED
- char trace[128] = "";
- FILE * trace_file_ptr = NULL;
-#endif /* H5AC__TRACE_FILE_ENABLED */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(H5AC_mark_pinned_entry_dirty, FAIL)
-
- /* Sanity check */
- HDassert(thing);
-
-#if H5AC__TRACE_FILE_ENABLED
- /* For the mark pinned entry dirty call, only the addr, size_changed,
- * and new_size are really necessary in the trace file. Write the result
- * to catch occult errors.
- */
- if((H5C_get_trace_file_ptr_from_entry(thing, &trace_file_ptr) >= 0) &&
- (NULL != trace_file_ptr))
- sprintf(trace, "%s 0x%lx %d %d", FUNC,
- (unsigned long)(((H5C_cache_entry_t *)thing)->addr),
- (int)size_changed,
- (int)new_size);
-#endif /* H5AC__TRACE_FILE_ENABLED */
-
-#ifdef H5_HAVE_PARALLEL
-{
- H5AC_info_t *entry_ptr = (H5AC_info_t *)thing;
- H5C_t *cache_ptr = entry_ptr->cache_ptr;
-
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- if((!entry_ptr->is_dirty) && (NULL != cache_ptr->aux_ptr)) {
- /* Check for usage errors */
- if(!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry isn't pinned??")
- if(entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is protected??")
-
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr, size_changed, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
- } /* end if */
-}
-#endif /* H5_HAVE_PARALLEL */
-
- if(H5C_mark_pinned_entry_dirty(thing, size_changed, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't mark pinned entry dirty")
-
-done:
-#if H5AC__TRACE_FILE_ENABLED
- if(trace_file_ptr)
- HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
-#endif /* H5AC__TRACE_FILE_ENABLED */
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_mark_pinned_entry_dirty() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5AC_mark_pinned_or_protected_entry_dirty
+ * Function: H5AC_mark_entry_dirty
*
* Purpose: Mark a pinned or protected entry as dirty. The target
* entry MUST be either pinned, protected, or both.
*
- * Unlike H5AC_mark_pinned_entry_dirty(), this function does
- * not support size changes.
- *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -1337,7 +1090,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_mark_pinned_or_protected_entry_dirty(void *thing)
+H5AC_mark_entry_dirty(void *thing)
{
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
@@ -1345,7 +1098,7 @@ H5AC_mark_pinned_or_protected_entry_dirty(void *thing)
#endif /* H5AC__TRACE_FILE_ENABLED */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5AC_mark_pinned_or_protected_entry_dirty, FAIL)
+ FUNC_ENTER_NOAPI(H5AC_mark_entry_dirty, FAIL)
/* Sanity check */
HDassert(thing);
@@ -1371,13 +1124,13 @@ H5AC_mark_pinned_or_protected_entry_dirty(void *thing)
if((!entry_ptr->is_dirty) && (!entry_ptr->is_protected) &&
(entry_ptr->is_pinned) && (NULL != cache_ptr->aux_ptr)) {
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr, FALSE, 0) < 0)
+ if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
} /* end if */
}
#endif /* H5_HAVE_PARALLEL */
- if(H5C_mark_pinned_or_protected_entry_dirty(thing) < 0)
+ if(H5C_mark_entry_dirty(thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't mark pinned or protected entry dirty")
done:
@@ -1387,11 +1140,11 @@ done:
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_mark_pinned_or_protected_entry_dirty() */
+} /* H5AC_mark_entry_dirty() */
/*-------------------------------------------------------------------------
- * Function: H5AC_rename
+ * Function: H5AC_move_entry
*
* Purpose: Use this function to notify the cache that an object's
* file address changed.
@@ -1402,37 +1155,10 @@ done:
* matzke@llnl.gov
* Jul 9 1997
*
- * Modifications:
- * Robb Matzke, 1999-07-27
- * The OLD_ADDR and NEW_ADDR arguments are passed by value.
- *
- * JRM 5/17/04
- * Complete rewrite for the new meta-data cache.
- *
- * JRM - 6/7/04
- * Abstracted the guts of the function to H5C_rename_entry()
- * in H5C.c, and then re-wrote the function as a wrapper for
- * H5C_rename_entry().
- *
- * JRM - 7/5/05
- * Added code to track dirty byte generation, and to trigger
- * clean entry list propagation when it exceeds a user
- * specified threshold. Note that this code only applies in
- * the PHDF5 case. It should have no effect on either the
- * serial or FPHSD5 cases.
- *
- * Note that this code presumes that the renamed entry will
- * be present in all caches -- which it must be at present.
- * To maintain this invarient, only rename entries immediately
- * after you unprotect them.
- *
- * JRM - 6/6/06
- * Added trace file support.
- *
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_addr)
+H5AC_move_entry(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_addr)
{
herr_t result;
herr_t ret_value=SUCCEED; /* Return value */
@@ -1444,7 +1170,7 @@ H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_ad
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
- FUNC_ENTER_NOAPI(H5AC_rename, FAIL)
+ FUNC_ENTER_NOAPI(H5AC_move_entry, FAIL)
HDassert(f);
HDassert(f->shared->cache);
@@ -1454,7 +1180,7 @@ H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_ad
HDassert(H5F_addr_ne(old_addr, new_addr));
#if H5AC__TRACE_FILE_ENABLED
- /* For the rename call, only the old addr and new addr are really
+ /* For the move call, only the old addr and new addr are really
* necessary in the trace file. Include the type id so we don't have to
* look it up. Also write the result to catch occult errors.
*/
@@ -1464,7 +1190,7 @@ H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_ad
( H5C_get_trace_file_ptr(f->shared->cache, &trace_file_ptr) >= 0) &&
( trace_file_ptr != NULL ) ) {
- sprintf(trace, "H5AC_rename 0x%lx 0x%lx %d",
+ sprintf(trace, "H5AC_move_entry 0x%lx 0x%lx %d",
(unsigned long)old_addr,
(unsigned long)new_addr,
(int)(type->id));
@@ -1473,20 +1199,20 @@ H5AC_rename(H5F_t *f, const H5AC_class_t *type, haddr_t old_addr, haddr_t new_ad
#ifdef H5_HAVE_PARALLEL
if ( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) {
- if(H5AC_log_renamed_entry(f, old_addr, new_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log renamed entry")
+ if(H5AC_log_moved_entry(f, old_addr, new_addr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log moved entry")
}
#endif /* H5_HAVE_PARALLEL */
- result = H5C_rename_entry(f->shared->cache,
+ result = H5C_move_entry(f->shared->cache,
type,
old_addr,
new_addr);
if ( result < 0 ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
- "H5C_rename_entry() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, \
+ "H5C_move_entry() failed.")
}
#ifdef H5_HAVE_PARALLEL
@@ -1518,7 +1244,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_rename() */
+} /* H5AC_move_entry() */
/*-------------------------------------------------------------------------
@@ -1637,56 +1363,13 @@ done:
* or flushed -- nor may it be accessed by another call to
* H5AC_protect. Any attempt to do so will result in a failure.
*
- * This comment is a re-write of the original Purpose: section.
- * For historical interest, the original version is reproduced
- * below:
- *
- * Original Purpose section:
- *
- * Similar to H5AC_find() except the object is removed from
- * the cache and given to the caller, preventing other parts
- * of the program from modifying the protected object or
- * preempting it from the cache.
- *
- * The caller must call H5AC_unprotect() when finished with
- * the pointer.
- *
* Return: Success: Ptr to the object.
- *
* Failure: NULL
*
* Programmer: Robb Matzke
* matzke@llnl.gov
* Sep 2 1997
*
- * Modifications:
- * Robb Matzke, 1999-07-27
- * The ADDR argument is passed by value.
- *
- * Bill Wendling, 2003-09-10
- * Added parameter to indicate whether this is a READ or
- * WRITE type of protect.
- *
- * JRM -- 5/17/04
- * Complete re-write for the new client cache. See revised
- * Purpose section above.
- *
- * JRM - 6/7/04
- * Abstracted the guts of the function to H5C_protect()
- * in H5C.c, and then re-wrote the function as a wrapper for
- * H5C_protect().
- *
- * JRM - 6/6/06
- * Added trace file support.
- *
- * JRM - 3/18/07
- * Modified code to support the new flags parameter for
- * H5C_protect(). For now, that means passing in the
- * H5C_READ_ONLY_FLAG if rw == H5AC_READ.
- *
- * Also updated the trace file output to save the
- * rw parameter, since we are now doing something with it.
- *
*-------------------------------------------------------------------------
*/
void *
@@ -1697,7 +1380,6 @@ H5AC_protect(H5F_t *f,
void *udata,
H5AC_protect_t rw)
{
- /* char * fcn_name = "H5AC_protect"; */
unsigned protect_flags = H5C__NO_FLAGS_SET;
void * thing = (void *)NULL;
void * ret_value; /* Return value */
@@ -1762,11 +1444,11 @@ H5AC_protect(H5F_t *f,
}
thing = H5C_protect(f,
- dxpl_id,
+ dxpl_id,
H5AC_noblock_dxpl_id,
- type,
- addr,
- udata,
+ type,
+ addr,
+ udata,
protect_flags);
if ( thing == NULL ) {
@@ -1802,10 +1484,9 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5AC_resize_pinned_entry
+ * Function: H5AC_resize_entry
*
- * Purpose: Resize a pinned entry. The target entry MUST be
- * be pinned, and MUST not be unprotected.
+ * Purpose: Resize a pinned or protected entry.
*
* Return: Non-negative on success/Negative on failure
*
@@ -1815,7 +1496,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_resize_pinned_entry(void *thing, size_t new_size)
+H5AC_resize_entry(void *thing, size_t new_size)
{
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
@@ -1823,7 +1504,7 @@ H5AC_resize_pinned_entry(void *thing, size_t new_size)
#endif /* H5AC__TRACE_FILE_ENABLED */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5AC_resize_pinned_entry, FAIL)
+ FUNC_ENTER_NOAPI(H5AC_resize_entry, FAIL)
/* Sanity check */
HDassert(thing);
@@ -1840,6 +1521,9 @@ H5AC_resize_pinned_entry(void *thing, size_t new_size)
(int)new_size);
#endif /* H5AC__TRACE_FILE_ENABLED */
+ if(H5C_resize_entry(thing, new_size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "can't resize entry")
+
#ifdef H5_HAVE_PARALLEL
{
H5AC_info_t * entry_ptr = (H5AC_info_t *)thing;
@@ -1849,21 +1533,12 @@ H5AC_resize_pinned_entry(void *thing, size_t new_size)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
if((!entry_ptr->is_dirty) && (NULL != cache_ptr->aux_ptr)) {
- /* Check for usage errors */
- if(!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "Entry isn't pinned??")
- if(entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "Entry is protected??")
-
- if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr, TRUE, new_size) < 0)
+ if(H5AC_log_dirtied_entry(entry_ptr, entry_ptr->addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
} /* end if */
}
#endif /* H5_HAVE_PARALLEL */
- if(H5C_resize_pinned_entry(thing, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "can't resize entry")
-
done:
#if H5AC__TRACE_FILE_ENABLED
if(trace_file_ptr)
@@ -1871,7 +1546,7 @@ done:
#endif /* H5AC__TRACE_FILE_ENABLED */
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_resize_pinned_entry() */
+} /* H5AC_resize_entry() */
/*-------------------------------------------------------------------------
@@ -2010,57 +1685,6 @@ done:
* matzke@llnl.gov
* Sep 2 1997
*
- * Modifications:
- * Robb Matzke, 1999-07-27
- * The ADDR argument is passed by value.
- *
- * Quincey Koziol, 2003-03-19
- * Added "deleted" argument
- *
- * Bill Wendling, 2003-09-18
- * If this is an FPHDF5 driver and the data is dirty,
- * perform a "flush" that writes the data to the SAP.
- *
- * John Mainzer 5/19/04
- * Complete re-write for the new metadata cache.
- *
- * JRM - 6/7/04
- * Abstracted the guts of the function to H5C_unprotect()
- * in H5C.c, and then re-wrote the function as a wrapper for
- * H5C_unprotect().
- *
- * JRM - 1/6/05
- * Replaced the deleted parameter with the new flags parameter.
- * Since the deleted parameter is not used by the FPHDF5 code,
- * the only change in the body is to replace the deleted
- * parameter with the flags parameter in the call to
- * H5C_unprotect().
- *
- * JRM - 6/6/05
- * Added the dirtied flag and supporting code. This is
- * part of a collection of changes directed at moving
- * management of cache entry dirty flags into the H5C code.
- *
- * JRM - 7/5/05
- * Added code to track dirty byte generation, and to trigger
- * clean entry list propagation when it exceeds a user
- * specified threshold. Note that this code only applies in
- * the PHDF5 case. It should have no effect on either the
- * serial or FPHSD5 cases.
- *
- * JRM - 9/8/05
- * Added code to track entry size changes. This is necessary
- * as it can effect dirty byte creation counts, thereby
- * throwing the caches out of sync in the PHDF5 case.
- *
- * JRM - 5/16/06
- * Added code to use the new dirtied field in
- * H5C_cache_entry_t in the test to see if the entry has
- * been dirtied.
- *
- * JRM - 6/7/06
- * Added support for the trace file.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2069,15 +1693,13 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
{
herr_t result;
hbool_t dirtied;
- size_t new_size = 0;
+ hbool_t deleted;
#ifdef H5_HAVE_PARALLEL
hbool_t size_changed = FALSE;
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
- size_t trace_new_size = 0;
- unsigned trace_flags = 0;
FILE * trace_file_ptr = NULL;
#endif /* H5AC__TRACE_FILE_ENABLED */
herr_t ret_value=SUCCEED; /* Return value */
@@ -2109,39 +1731,33 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
sprintf(trace, "H5AC_unprotect 0x%lx %d",
(unsigned long)addr,
(int)(type->id));
-
- trace_flags = flags;
}
#endif /* H5AC__TRACE_FILE_ENABLED */
dirtied = (hbool_t)( ( (flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG ) ||
( ((H5AC_info_t *)thing)->dirtied ) );
+ deleted = (hbool_t)( (flags & H5C__DELETED_FLAG) == H5C__DELETED_FLAG );
- if ( dirtied ) {
+ /* Check if the size changed out from underneath us, if we're not deleting
+ * the entry.
+ */
+ if ( dirtied && !deleted ) {
+ size_t curr_size = 0;
- if ( (type->size)(f, thing, &new_size) < 0 ) {
+ if ( (type->size)(f, thing, &curr_size) < 0 ) {
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
"Can't get size of thing")
}
- if ( ((H5AC_info_t *)thing)->size != new_size ) {
-
-#ifdef H5_HAVE_PARALLEL
- size_changed = TRUE;
-#endif /* H5_HAVE_PARALLEL */
- flags = flags | H5AC__SIZE_CHANGED_FLAG;
-#if H5AC__TRACE_FILE_ENABLED
- trace_flags = flags;
- trace_new_size = new_size;
-#endif /* H5AC__TRACE_FILE_ENABLED */
- }
+ if(((H5AC_info_t *)thing)->size != curr_size)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "size of entry changed")
}
#ifdef H5_HAVE_PARALLEL
if ( ( dirtied ) && ( ((H5AC_info_t *)thing)->is_dirty == FALSE ) &&
( NULL != (aux_ptr = f->shared->cache->aux_ptr) ) ) {
- if(H5AC_log_dirtied_entry((H5AC_info_t *)thing, addr, size_changed, new_size) < 0)
+ if(H5AC_log_dirtied_entry((H5AC_info_t *)thing, addr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
}
@@ -2163,13 +1779,12 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
#endif /* H5_HAVE_PARALLEL */
result = H5C_unprotect(f,
- dxpl_id,
+ dxpl_id,
H5AC_noblock_dxpl_id,
- type,
- addr,
- thing,
- flags,
- new_size);
+ type,
+ addr,
+ thing,
+ flags);
if ( result < 0 ) {
@@ -2200,10 +1815,9 @@ done:
#if H5AC__TRACE_FILE_ENABLED
if ( trace_file_ptr != NULL ) {
- HDfprintf(trace_file_ptr, "%s %d %x %d\n",
+ HDfprintf(trace_file_ptr, "%s %x %d\n",
trace,
- (int)trace_new_size,
- (unsigned)trace_flags,
+ (unsigned)flags,
(int)ret_value);
}
#endif /* H5AC__TRACE_FILE_ENABLED */
@@ -2225,11 +1839,8 @@ done:
* Programmer: John Mainzer
* 5/11/06
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
herr_t
H5AC_set_write_done_callback(H5C_t * cache_ptr,
@@ -2272,15 +1883,6 @@ done:
* Programmer: Robb Matzke
* Thursday, October 30, 1997
*
- * Modifications:
- * John Mainzer 5/19/04
- * Re-write to support the new metadata cache.
- *
- * JRM - 6/7/04
- * Abstracted the guts of the function to H5C_stats()
- * in H5C.c, and then re-wrote the function as a wrapper for
- * H5C_stats().
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2313,34 +1915,8 @@ done:
* Programmer: John Mainzer
* 3/10/05
*
- * Modifications:
- *
- * JRM - 4/6/05
- * Reworked for the addition of struct H5AC_cache_config_t.
- *
- * JRM - 10/25/05
- * Added support for the new dirty_bytes_threshold field of
- * both H5AC_cache_config_t and H5AC_aux_t.
- *
- * JRM - 6/8/06
- * Added support for the new trace file related fields.
- *
- * JRM - 7/28/07
- * Added support for the new evictions enabled related fields.
- *
- * Observe that H5AC_get_cache_auto_resize_config() and
- * H5AC_set_cache_auto_resize_config() are becoming generic
- * metadata cache configuration routines as they gain
- * switches for functions that are only tenuously related
- * to auto resize configuration.
- *
- * JRM - 1/2/08
- * Added support for the new flash cache increment related
- * fields.
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
H5AC_cache_config_t *config_ptr)
@@ -2460,10 +2036,6 @@ done:
* Programmer: John Mainzer
* 3/11/05
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2507,10 +2079,6 @@ done:
* Programmer: John Mainzer
* 3/10/05
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2538,13 +2106,8 @@ done:
*
* Programmer: John Mainzer, 3/10/05
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr)
{
@@ -2578,31 +2141,6 @@ done:
* Programmer: John Mainzer
* 3/10/05
*
- * Modifications:
- *
- * John Mainzer -- 4/6/05
- * Updated for the addition of H5AC_cache_config_t.
- *
- * John Mainzer -- 10/25/05
- * Added support for the new dirty_bytes_threshold field of
- * both H5AC_cache_config_t and H5AC_aux_t.
- *
- * John Mainzer -- 6/7/06
- * Added trace file support.
- *
- * John Mainzer -- 7/28/07
- * Added support for the new evictions enabled related fields.
- *
- * Observe that H5AC_get_cache_auto_resize_config() and
- * H5AC_set_cache_auto_resize_config() are becoming generic
- * metadata cache configuration routines as they gain
- * switches for functions that are only tenuously related
- * to auto resize configuration.
- *
- * John Mainzer -- 1/3/07
- * Updated trace file code to record the new flash cache
- * size increase related fields.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2800,21 +2338,6 @@ done:
* Programmer: John Mainzer
* 4/6/05
*
- * Modifications:
- *
- * - Added code testing the trace file configuration fields.
- * These tests are not comprehensive, as many errors cannot
- * be caught until the directives contained in these fields
- * are applied.
- * JRM - 5/15/06
- *
- * - Added code testing the evictions enabled field. At
- * present this consists of verifying that if
- * evictions_enabled is FALSE, then automatic cache
- * resizing in disabled.
- *
- * JRM - 7/28/07
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2942,13 +2465,8 @@ done:
* Programmer: John Mainzer
* 6/2/06
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5AC_close_trace_file(H5AC_t * cache_ptr)
@@ -3005,10 +2523,6 @@ done:
* Programmer: John Mainzer
* 6/1/06
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3128,11 +2642,8 @@ done:
*
* Programmer: John Mainzer, 7/1/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC_broadcast_clean_list(H5AC_t * cache_ptr)
@@ -3305,21 +2816,8 @@ done:
*
* Programmer: John Mainzer, 5/15/04
*
- * Modifications:
- *
- * John Mainzer, 9/23/05
- * Rewrote function to return the value of the
- * write_permitted field in aux structure if the structure
- * exists and mpi_rank is 0.
- *
- * If the aux structure exists, but mpi_rank isn't 0, the
- * function now returns FALSE.
- *
- * In all other cases, the function returns TRUE.
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC_check_if_write_permitted(const H5F_t *f,
@@ -3388,12 +2886,6 @@ done:
* Programmer: John Mainzer
* 1/26/06
*
- * Modifications:
- *
- * Updated function for flash cache increment fields.
- *
- * JRM -- 1/2/08
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3468,11 +2960,8 @@ done:
*
* Programmer: John Mainzer, 6/29/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC_log_deleted_entry(H5AC_t * cache_ptr,
@@ -3587,11 +3076,8 @@ done:
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
- haddr_t addr,
- hbool_t size_changed,
- size_t new_size)
+ haddr_t addr)
{
- size_t entry_size;
H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
herr_t ret_value = SUCCEED; /* Return value */
@@ -3612,15 +3098,6 @@ H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
HDassert( aux_ptr != NULL );
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
- if ( size_changed ) {
-
- entry_size = new_size;
-
- } else {
-
- entry_size = entry_ptr->size;
- }
-
if ( aux_ptr->mpi_rank == 0 ) {
H5AC_slist_entry_t * slist_entry_ptr;
@@ -3649,9 +3126,9 @@ H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
}
aux_ptr->d_slist_len += 1;
- aux_ptr->dirty_bytes += entry_size;
+ aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->unprotect_dirty_bytes += entry_size;
+ aux_ptr->unprotect_dirty_bytes += entry_ptr->size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
}
@@ -3686,7 +3163,7 @@ H5AC_log_dirtied_entry(const H5AC_info_t * entry_ptr,
}
} else {
- aux_ptr->dirty_bytes += entry_size;
+ aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->unprotect_dirty_bytes += entry_size;
aux_ptr->unprotect_dirty_bytes_updates += 1;
@@ -3720,11 +3197,8 @@ done:
*
* Programmer: John Mainzer, 6/29/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
#if 0 /* This is useful debugging code. -- JRM */
static herr_t
@@ -3882,21 +3356,15 @@ done:
*
* Programmer: John Mainzer, 6/30/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC_log_inserted_entry(H5F_t * f,
H5AC_t * cache_ptr,
- H5AC_info_t * entry_ptr,
- const H5AC_class_t * type,
- haddr_t addr)
+ H5AC_info_t * entry_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
- size_t size;
H5AC_aux_t * aux_ptr = NULL;
H5AC_slist_entry_t * slist_entry_ptr = NULL;
@@ -3911,24 +3379,13 @@ H5AC_log_inserted_entry(H5F_t * f,
HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC );
HDassert( entry_ptr != NULL );
- HDassert( entry_ptr->addr == addr );
- HDassert( entry_ptr->type == type );
-
- /* the size field of the entry will not have been set yet, so we
- * have to obtain it directly.
- */
- if ( (type->size)(f, (void *)entry_ptr, &size) < 0 ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
- "Can't get size of entry to be inserted.")
- }
if ( aux_ptr->mpi_rank == 0 ) {
HDassert( aux_ptr->d_slist_ptr != NULL );
HDassert( aux_ptr->c_slist_ptr != NULL );
- if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&addr)) == NULL ) {
+ if ( H5SL_search(aux_ptr->d_slist_ptr, (void *)(&entry_ptr->addr)) == NULL ) {
/* insert the address of the entry in the dirty entry list, and
* add its size to the dirty_bytes count.
@@ -3940,7 +3397,7 @@ H5AC_log_inserted_entry(H5F_t * f,
}
slist_entry_ptr->magic = H5AC__H5AC_SLIST_ENTRY_T_MAGIC;
- slist_entry_ptr->addr = addr;
+ slist_entry_ptr->addr = entry_ptr->addr;
if ( H5SL_insert(aux_ptr->d_slist_ptr, slist_entry_ptr,
&(slist_entry_ptr->addr)) < 0 ) {
@@ -3957,14 +3414,14 @@ H5AC_log_inserted_entry(H5F_t * f,
"Inserted entry already in dirty slist.")
}
- if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&addr)) != NULL ) {
+ if ( H5SL_search(aux_ptr->c_slist_ptr, (void *)(&entry_ptr->addr)) != NULL ) {
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Inserted entry in clean slist.")
}
}
- aux_ptr->dirty_bytes += size;
+ aux_ptr->dirty_bytes += entry_ptr->size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
aux_ptr->insert_dirty_bytes += size;
@@ -3981,15 +3438,15 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5AC_log_renamed_entry()
+ * Function: H5AC_log_moved_entry()
*
- * Purpose: Update the dirty_bytes count for a renamed entry.
+ * Purpose: Update the dirty_bytes count for a moved entry.
*
* WARNING
*
- * At present, the way that the rename call is used ensures
- * that the renamed entry is present in all caches by
- * renaming in a collective operation and immediately after
+ * At present, the way that the move call is used ensures
+ * that the moved entry is present in all caches by
+ * moving in a collective operation and immediately after
* unprotecting the target entry.
*
* This function uses this invarient, and will cause arcane
@@ -3997,17 +3454,17 @@ done:
* becomes impossible, we will have to rework this function
* extensively, and likely include a bit of IPC for
* synchronization. A better option might be to subsume
- * rename in the unprotect operation.
+ * move in the unprotect operation.
*
* Given that the target entry is in all caches, the function
* proceeds as follows:
*
* For processes with mpi rank other 0, it simply checks to
- * see if the entry was dirty prior to the rename, and adds
+ * see if the entry was dirty prior to the move, and adds
* the entries size to the dirty bytes count.
*
* In the process with mpi rank 0, the function first checks
- * to see if the entry was dirty prior to the rename. If it
+ * to see if the entry was dirty prior to the move. If it
* was, and if the entry doesn't appear in the dirtied list
* under its old address, it adds the entry's size to the
* dirty bytes count.
@@ -4023,13 +3480,11 @@ done:
*
* Programmer: John Mainzer, 6/30/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
-H5AC_log_renamed_entry(const H5F_t *f,
+H5AC_log_moved_entry(const H5F_t *f,
haddr_t old_addr,
haddr_t new_addr)
{
@@ -4041,7 +3496,7 @@ H5AC_log_renamed_entry(const H5F_t *f,
H5AC_slist_entry_t * slist_entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5AC_log_renamed_entry, FAIL)
+ FUNC_ENTER_NOAPI(H5AC_log_moved_entry, FAIL)
HDassert( f );
HDassert( f->shared );
@@ -4146,8 +3601,8 @@ H5AC_log_renamed_entry(const H5F_t *f,
aux_ptr->dirty_bytes += entry_size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->rename_dirty_bytes += entry_size;
- aux_ptr->rename_dirty_bytes_updates += 1;
+ aux_ptr->move_dirty_bytes += entry_size;
+ aux_ptr->move_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
}
@@ -4173,8 +3628,8 @@ H5AC_log_renamed_entry(const H5F_t *f,
aux_ptr->dirty_bytes += entry_size;
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- aux_ptr->rename_dirty_bytes += entry_size;
- aux_ptr->rename_dirty_bytes_updates += 1;
+ aux_ptr->move_dirty_bytes += entry_size;
+ aux_ptr->move_dirty_bytes_updates += 1;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
}
@@ -4182,7 +3637,7 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5AC_log_renamed_entry() */
+} /* H5AC_log_moved_entry() */
#endif /* H5_HAVE_PARALLEL */
@@ -4251,14 +3706,8 @@ done:
* Programmer: John Mainzer
* July 5, 2005
*
- * Modifications:
- *
- * JRM -- 5/11/06
- * Added code to call the write_done callback.
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
herr_t
H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
@@ -4290,8 +3739,8 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
(int)(aux_ptr->unprotect_dirty_bytes_updates),
(int)(aux_ptr->insert_dirty_bytes),
(int)(aux_ptr->insert_dirty_bytes_updates),
- (int)(aux_ptr->rename_dirty_bytes),
- (int)(aux_ptr->rename_dirty_bytes_updates));
+ (int)(aux_ptr->move_dirty_bytes),
+ (int)(aux_ptr->move_dirty_bytes_updates));
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
if ( do_barrier ) {
@@ -4352,8 +3801,8 @@ H5AC_propagate_flushed_and_still_clean_entries_list(H5F_t * f,
aux_ptr->unprotect_dirty_bytes_updates = 0;
aux_ptr->insert_dirty_bytes = 0;
aux_ptr->insert_dirty_bytes_updates = 0;
- aux_ptr->rename_dirty_bytes = 0;
- aux_ptr->rename_dirty_bytes_updates = 0;
+ aux_ptr->move_dirty_bytes = 0;
+ aux_ptr->move_dirty_bytes_updates = 0;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
done:
@@ -4380,11 +3829,8 @@ done:
*
* Programmer: John Mainzer, 7/4/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC_receive_and_apply_clean_list(H5F_t * f,
@@ -4547,8 +3993,8 @@ H5AC_flush_entries(H5F_t *f)
(int)(aux_ptr->unprotect_dirty_bytes_updates),
(int)(aux_ptr->insert_dirty_bytes),
(int)(aux_ptr->insert_dirty_bytes_updates),
- (int)(aux_ptr->rename_dirty_bytes),
- (int)(aux_ptr->rename_dirty_bytes_updates));
+ (int)(aux_ptr->move_dirty_bytes),
+ (int)(aux_ptr->move_dirty_bytes_updates));
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
/* to prevent "messages from the future" we must synchronize all
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 911aceb..d5346f5 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -107,7 +107,7 @@
*
* Maintaining this count is easy for all processes not on process 0 --
* all that is necessary is to add the size of the entry to the total
- * whenever there is an insertion, a rename of a previously clean entry,
+ * whenever there is an insertion, a move of a previously clean entry,
* or whever a previously clean entry is marked dirty in an unprotect.
*
* On process 0, we have to be careful not to count dirty bytes twice.
@@ -197,18 +197,18 @@
* been created via insert operations since the last time
* the cleaned list was propagated.
*
- * rename_dirty_bytes: This field only exists when the
+ * move_dirty_bytes: This field only exists when the
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of dirty bytes created
- * via rename operations since the last time the cleaned
+ * via move operations since the last time the cleaned
* list was propagated.
*
- * rename_dirty_bytes_updates: This field only exists when the
+ * move_dirty_bytes_updates: This field only exists when the
* H5AC_DEBUG_DIRTY_BYTES_CREATION #define is TRUE.
*
* It is used to track the number of times dirty bytes have
- * been created via rename operations since the last time
+ * been created via move operations since the last time
* the cleaned list was propagated.
*
* d_slist_ptr: Pointer to an instance of H5SL_t used to maintain a list
@@ -224,7 +224,7 @@
*
* 1) an entry is inserted in the metadata cache, or
*
- * 2) a previously clean entry is renamed, and it does not
+ * 2) a previously clean entry is moved, and it does not
* already appear in the dirty entry list, or
*
* 3) a previously clean entry is unprotected with the
@@ -234,7 +234,7 @@
* Entries are added to the dirty entry list whever they cause
* the dirty bytes count to be increased. They are removed
* when they appear in a clean entries broadcast. Note that
- * renames must be reflected in the dirty entry list.
+ * moves must be reflected in the dirty entry list.
*
* To reitterate, this field is only used on process 0 -- it
* should be NULL on all other processes.
@@ -303,8 +303,8 @@ typedef struct H5AC_aux_t
int32_t insert_dirty_bytes;
int32_t insert_dirty_bytes_updates;
- int32_t rename_dirty_bytes;
- int32_t rename_dirty_bytes_updates;
+ int32_t move_dirty_bytes;
+ int32_t move_dirty_bytes_updates;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 8a2f706..221dd41 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -138,7 +138,7 @@ typedef enum {
#define H5AC_CALLBACK__NO_FLAGS_SET H5C_CALLBACK__NO_FLAGS_SET
#define H5AC_CALLBACK__SIZE_CHANGED_FLAG H5C_CALLBACK__SIZE_CHANGED_FLAG
-#define H5AC_CALLBACK__RENAMED_FLAG H5C_CALLBACK__RENAMED_FLAG
+#define H5AC_CALLBACK__MOVED_FLAG H5C_CALLBACK__MOVED_FLAG
/* Aliases for 'notify action' type & values */
typedef H5C_notify_action_t H5AC_notify_action_t;
@@ -218,7 +218,7 @@ extern hid_t H5AC_dxpl_id;
/* Dataset transfer property list for independent metadata I/O calls */
/* (just "library internal" set - i.e. independent transfer mode) */
/* (Global variable declaration, definition is in H5AC.c) */
-extern hid_t H5AC_ind_dxpl_id;
+H5_DLLVAR hid_t H5AC_ind_dxpl_id;
/* Default cache configuration. */
@@ -307,7 +307,6 @@ extern hid_t H5AC_ind_dxpl_id;
#define H5AC__SET_FLUSH_MARKER_FLAG H5C__SET_FLUSH_MARKER_FLAG
#define H5AC__DELETED_FLAG H5C__DELETED_FLAG
#define H5AC__DIRTIED_FLAG H5C__DIRTIED_FLAG
-#define H5AC__SIZE_CHANGED_FLAG H5C__SIZE_CHANGED_FLAG
#define H5AC__PIN_ENTRY_FLAG H5C__PIN_ENTRY_FLAG
#define H5AC__UNPIN_ENTRY_FLAG H5C__UNPIN_ENTRY_FLAG
#define H5AC__FLUSH_INVALIDATE_FLAG H5C__FLUSH_INVALIDATE_FLAG
@@ -343,17 +342,15 @@ H5_DLL herr_t H5AC_create_flush_dependency(void *parent_thing, void *child_thing
H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
haddr_t addr, void *udata,
H5AC_protect_t rw);
-H5_DLL herr_t H5AC_resize_pinned_entry(void *thing, size_t new_size);
+H5_DLL herr_t H5AC_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5AC_unpin_entry(void *thing);
H5_DLL herr_t H5AC_destroy_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id,
const H5AC_class_t *type, haddr_t addr,
void *thing, unsigned flags);
H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5AC_mark_pinned_entry_dirty(void *thing, hbool_t size_changed,
- size_t new_size);
-H5_DLL herr_t H5AC_mark_pinned_or_protected_entry_dirty(void *thing);
-H5_DLL herr_t H5AC_rename(H5F_t *f, const H5AC_class_t *type,
+H5_DLL herr_t H5AC_mark_entry_dirty(void *thing);
+H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type,
haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
diff --git a/src/H5B.c b/src/H5B.c
index 50eea4c..37d40bf 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -668,7 +668,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
bt = NULL; /* Make certain future references will be caught */
/* Move the location of the old root on the disk */
- if(H5AC_rename(f, H5AC_BT, addr, old_root) < 0)
+ if(H5AC_move_entry(f, H5AC_BT, addr, old_root) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTSPLIT, FAIL, "unable to move B-tree root node")
/* clear the old root info at the old address (we already copied it) */
@@ -680,12 +680,11 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr,
new_bt->nchildren = 2;
new_bt->child[0] = old_root;
- HDmemcpy(H5B_NKEY(new_bt,shared,0), lt_key, shared->type->sizeof_nkey);
+ HDmemcpy(H5B_NKEY(new_bt, shared, 0), lt_key, shared->type->sizeof_nkey);
new_bt->child[1] = child;
- HDmemcpy(H5B_NKEY(new_bt,shared,1), md_key, shared->type->sizeof_nkey);
-
- HDmemcpy(H5B_NKEY(new_bt,shared,2), rt_key, shared->type->sizeof_nkey);
+ HDmemcpy(H5B_NKEY(new_bt, shared, 1), md_key, shared->type->sizeof_nkey);
+ HDmemcpy(H5B_NKEY(new_bt, shared, 2), rt_key, shared->type->sizeof_nkey);
/* Insert the modified copy of the old root into the file again */
if(H5AC_set(f, dxpl_id, H5AC_BT, addr, new_bt, H5AC__NO_FLAGS_SET) < 0)
diff --git a/src/H5B2cache.c b/src/H5B2cache.c
index a28811a..6a3b5da 100644
--- a/src/H5B2cache.c
+++ b/src/H5B2cache.c
@@ -157,7 +157,6 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
H5B2_create_t cparam; /* B-tree creation parameters */
H5B2_subid_t id; /* ID of B-tree class, as found in file */
uint16_t depth; /* Depth of B-tree */
- size_t size; /* Header size */
uint32_t stored_chksum; /* Stored metadata checksum value */
uint32_t computed_chksum; /* Computed metadata checksum value */
H5WB_t *wb = NULL; /* Wrapped buffer for header data */
@@ -174,22 +173,19 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(udata);
/* Allocate new B-tree header and reset cache info */
- if(NULL == (hdr = H5B2_hdr_alloc(f)))
+ if(NULL == (hdr = H5B2_hdr_alloc(udata->f)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "allocation failed for B-tree header")
/* Wrap the local buffer for serialized header info */
if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't wrap buffer")
- /* Compute the size of the serialized B-tree header on disk */
- size = H5B2_HEADER_SIZE(hdr);
-
/* Get a pointer to a buffer that's large enough for header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, hdr->hdr_size)))
HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_BTREE, addr, size, dxpl_id, buf) < 0)
+ if(H5F_block_read(f, H5FD_MEM_BTREE, addr, hdr->hdr_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree header")
/* Get temporary pointer to serialized header */
@@ -231,10 +227,10 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
UINT32DECODE(p, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == size);
+ HDassert((size_t)(p - (const uint8_t *)buf) == hdr->hdr_size);
/* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(buf, (size - H5B2_SIZEOF_CHKSUM), 0);
+ computed_chksum = H5_checksum_metadata(buf, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -242,7 +238,7 @@ H5B2_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Initialize B-tree header info */
cparam.cls = H5B2_client_class_g[id];
- if(H5B2_hdr_init(udata->f, hdr, &cparam, udata->ctx_udata, depth) < 0)
+ if(H5B2_hdr_init(hdr, &cparam, udata->ctx_udata, depth) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, NULL, "can't initialize B-tree header info")
/* Set the B-tree header's address */
@@ -294,7 +290,6 @@ H5B2_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
if(hdr->cache_info.is_dirty) {
uint8_t *buf; /* Pointer to header buffer */
uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Header size on disk */
uint32_t metadata_chksum; /* Computed metadata checksum value */
/* Set the B-tree header's file context for this operation */
@@ -304,11 +299,8 @@ H5B2_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't wrap buffer")
- /* Compute the size of the serialized B-tree header on disk */
- size = H5B2_HEADER_SIZE(hdr);
-
/* Get a pointer to a buffer that's large enough for header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, hdr->hdr_size)))
HGOTO_ERROR(H5E_BTREE, H5E_NOSPACE, FAIL, "can't get actual buffer")
/* Get temporary pointer to serialized header */
@@ -345,14 +337,14 @@ H5B2_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
H5F_ENCODE_LENGTH(f, p, hdr->root.all_nrec);
/* Compute metadata checksum */
- metadata_chksum = H5_checksum_metadata(buf, (size - H5B2_SIZEOF_CHKSUM), 0);
+ metadata_chksum = H5_checksum_metadata(buf, (hdr->hdr_size - H5B2_SIZEOF_CHKSUM), 0);
/* Metadata checksum */
UINT32ENCODE(p, metadata_chksum);
/* Write the B-tree header. */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, size, dxpl_id, buf) < 0)
+ HDassert((size_t)(p - buf) == hdr->hdr_size);
+ if(H5F_block_write(f, H5FD_MEM_BTREE, addr, hdr->hdr_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to save B-tree header to disk")
hdr->cache_info.is_dirty = FALSE;
@@ -402,7 +394,7 @@ H5B2_cache_hdr_dest(H5F_t *f, H5B2_hdr_t *hdr)
if(hdr->cache_info.free_file_space_on_destroy) {
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, hdr->cache_info.addr, (hsize_t)H5B2_HEADER_SIZE(hdr)) < 0)
+ if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, hdr->cache_info.addr, (hsize_t)hdr->hdr_size) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free v2 B-tree header")
} /* end if */
@@ -477,7 +469,7 @@ H5B2_cache_hdr_size(const H5F_t UNUSED *f, const H5B2_hdr_t *hdr, size_t *size_p
HDassert(size_ptr);
/* Set size value */
- *size_ptr = H5B2_HEADER_SIZE(hdr);
+ *size_ptr = hdr->hdr_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5B2_cache_hdr_size() */
diff --git a/src/H5B2hdr.c b/src/H5B2hdr.c
index a219859..94e2c54 100644
--- a/src/H5B2hdr.c
+++ b/src/H5B2hdr.c
@@ -107,8 +107,8 @@ H5FL_SEQ_DEFINE(H5B2_node_info_t);
*-------------------------------------------------------------------------
*/
herr_t
-H5B2_hdr_init(H5F_t *f, H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
- void *ctx_udata, uint16_t depth)
+H5B2_hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam, void *ctx_udata,
+ uint16_t depth)
{
size_t sz_max_nrec; /* Temporary variable for range checking */
unsigned u_max_nrec_size; /* Temporary variable for range checking */
@@ -120,7 +120,6 @@ H5B2_hdr_init(H5F_t *f, H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
/*
* Check arguments.
*/
- HDassert(f);
HDassert(hdr);
HDassert(cparam);
HDassert(cparam->cls);
@@ -133,7 +132,6 @@ H5B2_hdr_init(H5F_t *f, H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
HDassert(cparam->merge_percent < (cparam->split_percent / 2));
/* Initialize basic information */
- hdr->f = f;
hdr->rc = 0;
hdr->pending_delete = FALSE;
@@ -258,6 +256,7 @@ H5B2_hdr_alloc(H5F_t *f)
hdr->f = f;
hdr->sizeof_addr = H5F_SIZEOF_ADDR(f);
hdr->sizeof_size = H5F_SIZEOF_SIZE(f);
+ hdr->hdr_size = H5B2_HEADER_SIZE(hdr);
hdr->root.addr = HADDR_UNDEF;
/* Set return value */
@@ -302,11 +301,11 @@ H5B2_hdr_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam,
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed for B-tree header")
/* Initialize shared B-tree info */
- if(H5B2_hdr_init(f, hdr, cparam, ctx_udata, (uint16_t)0) < 0)
+ if(H5B2_hdr_init(hdr, cparam, ctx_udata, (uint16_t)0) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, HADDR_UNDEF, "can't create shared B-tree info")
/* Allocate space for the header on disk */
- if(HADDR_UNDEF == (hdr->addr = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)H5B2_HEADER_SIZE(hdr))))
+ if(HADDR_UNDEF == (hdr->addr = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)hdr->hdr_size)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, HADDR_UNDEF, "file allocation failed for B-tree header")
/* Cache the new B-tree node */
@@ -479,7 +478,7 @@ H5B2_hdr_dirty(H5B2_hdr_t *hdr)
HDassert(hdr);
/* Mark B-tree header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(hdr) < 0)
+ if(H5AC_mark_entry_dirty(hdr) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTMARKDIRTY, FAIL, "unable to mark v2 B-tree header as dirty")
done:
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index 35c2eb1..543fa3f 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -161,6 +161,7 @@ typedef struct H5B2_hdr_t {
/* Shared internal data structures (not stored) */
H5F_t *f; /* Pointer to the file that the B-tree is in */
haddr_t addr; /* Address of B-tree header in the file */
+ size_t hdr_size; /* Size of the B-tree header on disk */
size_t rc; /* Reference count of nodes using this header */
size_t file_rc; /* Reference count of files using this header */
hbool_t pending_delete; /* B-tree is pending deletion */
@@ -273,8 +274,8 @@ extern const H5B2_class_t *const H5B2_client_class_g[H5B2_NUM_BTREE_ID];
H5_DLL H5B2_hdr_t *H5B2_hdr_alloc(H5F_t *f);
H5_DLL haddr_t H5B2_hdr_create(H5F_t *f, hid_t dxpl_id,
const H5B2_create_t *cparam, void *ctx_udata);
-H5_DLL herr_t H5B2_hdr_init(H5F_t *f, H5B2_hdr_t *hdr,
- const H5B2_create_t *cparam, void *ctx_udata, uint16_t depth);
+H5_DLL herr_t H5B2_hdr_init(H5B2_hdr_t *hdr, const H5B2_create_t *cparam,
+ void *ctx_udata, uint16_t depth);
H5_DLL herr_t H5B2_hdr_incr(H5B2_hdr_t *hdr);
H5_DLL herr_t H5B2_hdr_decr(H5B2_hdr_t *hdr);
H5_DLL herr_t H5B2_hdr_fuse_incr(H5B2_hdr_t *hdr);
diff --git a/src/H5B2stat.c b/src/H5B2stat.c
index a15ff11..afd1e33 100644
--- a/src/H5B2stat.c
+++ b/src/H5B2stat.c
@@ -131,7 +131,7 @@ H5B2_size(H5B2_t *bt2, hid_t dxpl_id, hsize_t *btree_size)
hdr = bt2->hdr;
/* Add size of header to B-tree metadata total */
- *btree_size += H5B2_HEADER_SIZE(hdr);
+ *btree_size += hdr->hdr_size;
/* Iterate through records */
if(hdr->root.node_nrec > 0) {
diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c
index 3212416..55d9617 100644
--- a/src/H5Bdbg.c
+++ b/src/H5Bdbg.c
@@ -189,7 +189,7 @@ H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
struct child_t *next;
} *head = NULL, *tail = NULL, *prev = NULL, *cur = NULL, *tmp = NULL;
- FUNC_ENTER_NOAPI_NOFUNC(H5B_assert)
+ FUNC_ENTER_NOAPI_NOINIT(H5B_assert)
if(0 == ncalls++) {
if(H5DEBUG(B))
@@ -281,6 +281,7 @@ H5B_assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
head = tmp;
} /* end while */
+done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B_assert() */
#endif /* H5B_DEBUG */
diff --git a/src/H5C.c b/src/H5C.c
index a3a857a..bfb7e05 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -32,24 +32,6 @@
* For a detailed overview of the cache, please see the
* header comment for H5C_t in H5Cpkg.h.
*
- * Modifications:
- *
- * QAK - 11/27/2004
- * Switched over to using skip list routines instead of TBBT
- * routines.
- *
- * JRM - 12/15/04
- * Added code supporting manual and automatic cache resizing.
- * See the header for H5C_auto_size_ctl_t in H5Cprivate.h for
- * an overview.
- *
- * Some elements of the automatic cache resize code depend on
- * the LRU list. Thus if we ever choose to support a new
- * replacement policy, we will either have to disable those
- * elements of the auto resize code when running the new
- * policy, or modify them to make use of similar information
- * maintained by the new policy code.
- *
*-------------------------------------------------------------------------
*/
@@ -63,13 +45,6 @@
*
* - Change protect/unprotect to lock/unlock.
*
- * - Change the way the dirty flag is set. Probably pass it in
- * as a parameter in unprotect & insert.
- *
- * - Size should also be passed in as a parameter in insert and
- * unprotect -- or some other way should be found to advise the
- * cache of changes in entry size.
- *
* - Flush entries in increasing address order in
* H5C_make_space_in_cache().
*
@@ -358,7 +333,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
}
-
/*-------------------------------------------------------------------------
* Function: H5C_create
@@ -382,70 +356,12 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/20/04
- * Updated for the addition of the hash table.
- *
- * JRM -- 10/5/04
- * Added call to H5C_reset_cache_hit_rate_stats(). Also
- * added initialization for cache_is_full flag and for
- * resize_ctl.
- *
- * JRM -- 11/12/04
- * Added initialization for the new size_decreased field.
- *
- * JRM -- 11/17/04
- * Added/updated initialization for the automatic cache
- * size control data structures.
- *
- * JRM -- 6/24/05
- * Added support for the new write_permitted field of
- * the H5C_t structure.
- *
- * JRM -- 7/5/05
- * Added the new log_flush parameter and supporting code.
- *
- * JRM -- 9/21/05
- * Added the new aux_ptr parameter and supporting code.
- *
- * JRM -- 1/20/06
- * Added initialization of the new prefix field in H5C_t.
- *
- * JRM -- 3/16/06
- * Added initialization for the pinned entry related fields.
- *
- * JRM -- 5/31/06
- * Added initialization for the trace_file_ptr field.
- *
- * JRM -- 8/19/06
- * Added initialization for the flush_in_progress field.
- *
- * JRM -- 8/25/06
- * Added initialization for the slist_len_increase and
- * slist_size_increase fields. These fields are used
- * for sanity checking in the flush process, and are not
- * compiled in unless H5C_DO_SANITY_CHECKS is TRUE.
- *
- * JRM -- 3/28/07
- * Added initialization for the new is_read_only and
- * ro_ref_count fields.
- *
- * JRM -- 7/27/07
- * Added initialization for the new evictions_enabled
- * field of H5C_t.
- *
- * JRM -- 12/31/07
- * Added initialization for the new flash cache size increase
- * related fields of H5C_t.
- *
* JRM -- 11/5/08
* Added initialization for the new clean_index_size and
* dirty_index_size fields of H5C_t.
*
*-------------------------------------------------------------------------
*/
-
H5C_t *
H5C_create(size_t max_cache_size,
size_t min_clean_size,
@@ -669,19 +585,6 @@ done:
* Programmer: John Mainzer
* 10/27/04
*
- * Modifications:
- *
- * JRM -- 11/22/04
- * Reworked function to adapt it to the addition of the
- * ageout method of cache size reduction.
- *
- * JRM -- 1/19/06
- * Updated function for display the new prefix field of
- * H5C_t in output.
- *
- * JRM 12/31/07
- * Updated function to handle flash size increases.
- *
*-------------------------------------------------------------------------
*/
void
@@ -926,10 +829,6 @@ done:
* Programmer: John Mainzer
* 6/29/06
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1052,77 +951,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/20/04
- * Modified the function for the addition of the hash table.
- *
- * JRM -- 11/22/04
- * Added code to remove all epoch markers (if any) from the
- * LRU list before a destroy. Strictly speaking, this isn't
- * necessary, as the marker entries reside only in the LRU
- * list, never in the index or in the tree. However, it
- * never hurts to tidy up.
- *
- * JRM -- 1/6/05
- * Reworked code to support the new
- * H5C__FLUSH_MARKED_ENTRIES_FLAG, and for the replacement of
- * H5F_FLUSH_INVALIDATE flag with H5C__FLUSH_INVALIDATE_FLAG.
- *
- * Note that the H5C__FLUSH_INVALIDATE_FLAG takes precidence
- * over the H5C__FLUSH_MARKED_ENTRIES_FLAG. Thus if both are
- * set, the functions behaves as if just the
- * H5C__FLUSH_INVALIDATE_FLAG was set.
- *
- * The H5C__FLUSH_CLEAR_ONLY_FLAG flag can co-exist with
- * either the H5C__FLUSH_MARKED_ENTRIES_FLAG, or the
- * H5C__FLUSH_INVALIDATE_FLAG. In all cases, it is simply
- * passed along to H5C_flush_single_entry(). In the case of
- * H5C__FLUSH_MARKED_ENTRIES_FLAG, it will only apply to
- * the marked entries.
- *
- * JRM -- 10/15/05
- * Added code supporting the new
- * H5C__FLUSH_IGNORE_PROTECTED_FLAG. We need this flag, as
- * we now use this function to flush large number of entries
- * in increasing address order. We do this by marking the
- * entries to be flushed, calling this function to flush them,
- * and then restoring LRU order.
- *
- * However, it is possible that the cache will contain other,
- * unmarked protected entries, when we make this call. This
- * new flag allows us to ignore them.
- *
- * Note that even with this flag set, it is still an error
- * to try to flush a protected entry.
- *
- * JRM -- 3/25/06
- * Updated function to handle pinned entries.
- *
- * JRM -- 8/19/06
- * Added code managing the new flush_in_progress field of
- * H5C_t.
- *
- * Also reworked function to allow for the possibility that
- * entries will be dirtied, resized, or renamed during flush
- * callbacks. As a result, we may have to make multiple
- * passes through the skip list before the cache is flushed.
- *
- * JRM -- 10/13/07
- * Added code to detect and manage the case in which a
- * flush callback changes the s-list out from under
- * the function. The only way I can think of in which this
- * can happen is if a flush function loads an entry
- * into the cache that isn't there already. Quincey tells
- * me that this will never happen, but I'm not sure I
- * believe him.
- *
- * Note that this is a pretty bad scenario if it ever
- * happens. The code I have added should allow us to
- * handle the situation under all but the worst conditions,
- * but one can argue that I should just scream and die if I
- * ever detect the condidtion.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1235,7 +1063,7 @@ H5C_flush_cache(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id, unsign
*
* To make things more entertaining, with the advent of the
* fractal heap, the entry flush callback can cause entries
- * to be dirtied, resized, and/or renamed.
+ * to be dirtied, resized, and/or moved.
*
* To deal with this, we first make note of the initial
* skip list length and size:
@@ -1505,34 +1333,6 @@ done:
* Programmer: John Mainzer
* 9/16/05
*
- * Modifications:
- *
- * Re-wrote function to flush dirty entries in increasing
- * address order, while maintaining LRU order in the LRU list
- * upon return.
- *
- * Do this by scanning up the dirty LRU list for entries to
- * flush to reach min clean size, setting their flush_marker
- * flags, and recording their addresses in the order
- * encountered.
- *
- * Then call H5C_flush_cache() to flush the marked entries.
- *
- * Finally, use the list of marked entries to force the
- * correct LRU list order after the flush.
- *
- * JRM - 10/13/05
- *
- * This change had the oposite of the desired effect. Lets
- * leave it in (albeit commented out for now). If we can't
- * find a case where it helps, lets get rid of it.
- *
- *
- * Added some sanity checks to the change which verify the
- * expected values of the new is_read_only and ro_ref_count
- * fields.
- * JRM - 3/29/07
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1720,10 +1520,6 @@ done:
* Programmer: John Mainzer
* 10/8/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1770,10 +1566,6 @@ done:
* Programmer: John Mainzer
* 10/8/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1833,10 +1625,6 @@ done:
* Programmer: John Mainzer
* 10/7/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1898,11 +1686,6 @@ done:
* Programmer: John Mainzer
* 7/1/05
*
- * Modifications:
- *
- * JRM -- 4/26/06
- * Added the is_pinned_ptr parameter and supporting code.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2002,10 +1785,6 @@ done:
* Programmer: John Mainzer
* 7/27/07
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2124,59 +1903,6 @@ H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr,
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * JRM -- 10/28/04
- * Added code to set the cache_full flag to TRUE when ever
- * we need to make space in the cache.
- *
- * JRM -- 11/22/04
- * Updated function for the addition of the first_flush_ptr
- * parameter to H5C_make_space_in_cache().
- *
- * JRM -- 1/6/05
- * Added the flags parameter, and code supporting
- * H5C__SET_FLUSH_MARKER_FLAG. Note that this flag is
- * ignored unless the new entry is dirty.
- *
- * JRM -- 6/6/05
- * Added code to force all inserted entries to be dirty.
- * This is part of a set of changes moving management of the
- * is_dirty field of H5C_cache_entry_t into the H5C code.
- *
- * JRM -- 6/24/05
- * Added support for the new write_permitted field of
- * the H5C_t structure.
- *
- * JRM -- 3/16/06
- * Added initialization for the new is_pinned field of the
- * H5C_cache_entry_t structure.
- *
- * JRM -- 5/3/06
- * Added initialization for the new dirtied field of the
- * H5C_cache_entry_t structure.
- *
- * JRM -- 8/9/06
- * Added code supporting insertion of pinned entries.
- *
- * JRM -- 8/21/06
- * Added initialization for the new flush_in_progress and
- * destroy_in_progress fields.
- *
- * JRM -- 3/29/07
- * Added initialization for the new is_read_only and
- * ro_ref_count fields.
- *
- * JRM -- 8/1/07
- * Added code to disable evictions when the new
- * evictions_enabled field is FALSE.
- *
- * JRM -- 12/31/07
- * Added code supporting flash cache size increases.
- *
* QAK -- 1/31/08
* Added initialization for the new free_file_space_on_destroy
* field.
@@ -2307,13 +2033,10 @@ H5C_insert_entry(H5F_t * f,
/* not protected, so can't be dirtied */
entry_ptr->dirtied = FALSE;
- if ( (type->size)(f, thing, &(entry_ptr->size)) < 0 ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, \
- "Can't get size of thing")
- }
-
- HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
+ /* Retrieve the size of the thing */
+ if((type->size)(f, thing, &(entry_ptr->size)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
+ HDassert(entry_ptr->size > 0 && entry_ptr->size < H5C_MAX_ENTRY_SIZE);
entry_ptr->in_slist = FALSE;
@@ -2527,20 +2250,6 @@ done:
* Programmer: John Mainzer
* 7/5/05
*
- * Modifications:
- *
- * Reworked function to flush entries in LRU order instead
- * of increasing address order. The hope is that this will
- * improve the hit rate on the slave caches.
- *
- * JRM - 10/13/05
- *
- * Leave the old code in place for now (commented out) for
- * benchmarking.
- *
- * JRM -- 4/13/06
- * Updated function to deal with pinned entries.
- *
*-------------------------------------------------------------------------
*/
#ifdef H5_HAVE_PARALLEL
@@ -2806,120 +2515,11 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5C_mark_pinned_entry_dirty
- *
- * Purpose: Mark a pinned entry as dirty. The target entry MUST be
- * be pinned, and MUST be unprotected.
- *
- * If the entry has changed size, the function updates
- * data structures for the size change.
- *
- * If the entry is not already dirty, the function places
- * the entry on the skip list.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: John Mainzer
- * 3/22/06
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5C_mark_pinned_entry_dirty(void *thing, hbool_t size_changed, size_t new_size)
-{
- H5C_t * cache_ptr;
- H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)thing;
- hbool_t was_clean;
- size_t size_increase;
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(H5C_mark_pinned_entry_dirty, FAIL)
-
- /* Sanity checks */
- HDassert(entry_ptr);
- HDassert(H5F_addr_defined(entry_ptr->addr));
- cache_ptr = entry_ptr->cache_ptr;
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
-
- /* Check for usage errors */
- if(!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry isn't pinned??")
- if(entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is protected??")
-
- /* make note of whether the entry was dirty to begin with */
- was_clean = ! ( entry_ptr->is_dirty );
-
- /* mark the entry as dirty if it isn't already */
- entry_ptr->is_dirty = TRUE;
-
- /* update for change in entry size if necessary */
- if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) {
-
- /* do a flash cache size increase if appropriate */
- if ( cache_ptr->flash_size_increase_possible ) {
-
- if ( new_size > entry_ptr->size ) {
-
- size_increase = new_size - entry_ptr->size;
-
- if ( size_increase >=
- cache_ptr->flash_size_increase_threshold ) {
- if(H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "flash cache increase failed")
- }
- }
- }
-
- /* update the pinned entry list */
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \
- (cache_ptr->pel_size), \
- (entry_ptr->size), (new_size));
-
- /* update the hash table */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size), \
- (new_size), (entry_ptr), (was_clean));
-
- /* if the entry is in the skip list, update that too */
- if ( entry_ptr->in_slist ) {
-
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
- (new_size));
- }
-
- /* update statistics just before changing the entry size */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \
- (new_size));
-
- /* finally, update the entry size proper */
- entry_ptr->size = new_size;
-
- } else if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) {
-
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
- }
-
- if ( ! (entry_ptr->in_slist) ) {
-
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
- }
-
- H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_mark_pinned_entry_dirty() */
-
-
-/*-------------------------------------------------------------------------
- * Function: H5C_mark_pinned_or_protected_entry_dirty
+ * Function: H5C_mark_entry_dirty
*
* Purpose: Mark a pinned or protected entry as dirty. The target entry
* MUST be either pinned or protected, and MAY be both.
*
- * At present, this funtion does not support size change.
- *
* In the protected case, this call is the functional
* equivalent of setting the H5C__DIRTIED_FLAG on an unprotect
* call.
@@ -2942,13 +2542,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_mark_pinned_or_protected_entry_dirty(void *thing)
+H5C_mark_entry_dirty(void *thing)
{
H5C_t * cache_ptr;
H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)thing;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5C_mark_pinned_or_protected_entry_dirty, FAIL)
+ FUNC_ENTER_NOAPI(H5C_mark_entry_dirty, FAIL)
/* Sanity checks */
HDassert(entry_ptr);
@@ -2958,9 +2558,8 @@ H5C_mark_pinned_or_protected_entry_dirty(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
if ( entry_ptr->is_protected ) {
-#if 0 /* JRM - uncomment this when possible */
HDassert( ! ((entry_ptr)->is_read_only) );
-#endif
+
/* set the dirtied flag */
entry_ptr->dirtied = TRUE;
@@ -2992,12 +2591,12 @@ H5C_mark_pinned_or_protected_entry_dirty(void *thing)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_mark_pinned_or_protected_entry_dirty() */
+} /* H5C_mark_entry_dirty() */
/*-------------------------------------------------------------------------
*
- * Function: H5C_rename_entry
+ * Function: H5C_move_entry
*
* Purpose: Use this function to notify the cache that an entry's
* file address changed.
@@ -3007,32 +2606,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * JRM -- 6/6/05
- * Updated function to force all renamed entries to be
- * dirty. This is part of a series of code modifications
- * moving management of the is_dirty field of
- * H5C_cache_entry_t into the H5C code.
- *
- * JRM -- 4/3/06
- * Updated function to disallow renaming of pinned entries.
- *
- * JRM -- 4/27/06
- * Updated function to support renaming of pinned entries.
- *
- * JRM -- 8/24/06
- * Updated function to refrain from altering the index, the
- * replacement policy data structures, and skip list when
- * the function is called within the flush callback for the
- * target entry and the target entry is being destroyed.
- *
- * Note that in this case H5C_flush_single_entry() will handle
- * all these details for us.
- *
* JRM -- 11/5/08
* On review this function looks like no change is needed to
* support the new clean_index_size and dirty_index_size
@@ -3041,7 +2614,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_rename_entry(H5C_t * cache_ptr,
+H5C_move_entry(H5C_t * cache_ptr,
const H5C_class_t * type,
haddr_t old_addr,
haddr_t new_addr)
@@ -3054,7 +2627,7 @@ H5C_rename_entry(H5C_t * cache_ptr,
#endif /* H5C_DO_SANITY_CHECKS */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5C_rename_entry, FAIL)
+ FUNC_ENTER_NOAPI(H5C_move_entry, FAIL)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
@@ -3084,7 +2657,7 @@ H5C_rename_entry(H5C_t * cache_ptr,
if ( entry_ptr->is_protected ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, \
"Target entry is protected.")
}
@@ -3094,12 +2667,12 @@ H5C_rename_entry(H5C_t * cache_ptr,
if ( test_entry_ptr->type == type ) {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
- "Target already renamed & reinserted???.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, \
+ "Target already moved & reinserted???.")
} else {
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRENAME, FAIL, \
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, \
"New address already in use?.")
}
@@ -3110,7 +2683,7 @@ H5C_rename_entry(H5C_t * cache_ptr,
* new address, mark it as dirty (if it isn't already) and then re-insert.
*
* Update the replacement policy for a hit to avoid an eviction before
- * the renamed entry is touched. Update stats for a rename.
+ * the moved entry is touched. Update stats for a move.
*
* Note that we do not check the size of the cache, or evict anything.
* Since this is a simple re-name, cache size should be unaffected.
@@ -3174,11 +2747,11 @@ H5C_rename_entry(H5C_t * cache_ptr,
#endif /* H5C_DO_SANITY_CHECKS */
- H5C__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, FAIL)
+ H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, FAIL)
}
}
- H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr)
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
done:
@@ -3192,14 +2765,13 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_rename_entry() */
+} /* H5C_move_entry() */
/*-------------------------------------------------------------------------
- * Function: H5C_resize_pinned_entry
+ * Function: H5C_resize_entry
*
- * Purpose: Resize a pinned entry. The target entry MUST be
- * be pinned, and MUST be unprotected.
+ * Purpose: Resize a pinned or protected entry.
*
* Resizing an entry dirties it, so if the entry is not
* already dirty, the function places the entry on the
@@ -3213,15 +2785,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5C_resize_pinned_entry(void *thing, size_t new_size)
+H5C_resize_entry(void *thing, size_t new_size)
{
H5C_t * cache_ptr;
H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)thing;
- size_t size_increase;
- hbool_t was_clean;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(H5C_resize_pinned_entry, FAIL)
+ FUNC_ENTER_NOAPI(H5C_resize_entry, FAIL)
/* Sanity checks */
HDassert(entry_ptr);
@@ -3232,41 +2802,46 @@ H5C_resize_pinned_entry(void *thing, size_t new_size)
/* Check for usage errors */
if(new_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "New size is non-positive.")
- if(!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "Entry isn't pinned??")
- if(entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "Entry is protected??")
-
- /* make note of whether the entry was clean to begin with */
- was_clean = ! ( entry_ptr->is_dirty );
-
- /* resizing dirties entries -- mark the entry as dirty if it
- * isn't already
- */
- entry_ptr->is_dirty = TRUE;
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive.")
+ if(!(entry_ptr->is_pinned || entry_ptr->is_protected))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
/* update for change in entry size if necessary */
if ( entry_ptr->size != new_size ) {
+ hbool_t was_clean;
+
+ /* make note of whether the entry was clean to begin with */
+ was_clean = ! ( entry_ptr->is_dirty );
+
+ /* mark the entry as dirty if it isn't already */
+ entry_ptr->is_dirty = TRUE;
/* do a flash cache size increase if appropriate */
if ( cache_ptr->flash_size_increase_possible ) {
if ( new_size > entry_ptr->size ) {
+ size_t size_increase;
size_increase = new_size - entry_ptr->size;
if(size_increase >= cache_ptr->flash_size_increase_threshold) {
if(H5C__flash_increase_cache_size(cache_ptr, entry_ptr->size, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "flash cache increase failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "flash cache increase failed")
}
}
}
- /* update the protected entry list */
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \
- (cache_ptr->pel_size), \
- (entry_ptr->size), (new_size));
+ /* update the pinned and/or protected entry list */
+ if(entry_ptr->is_pinned) {
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pel_len), \
+ (cache_ptr->pel_size), \
+ (entry_ptr->size), (new_size))
+ } /* end if */
+ if(entry_ptr->is_protected) {
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), \
+ (cache_ptr->pl_size), \
+ (entry_ptr->size), (new_size))
+ } /* end if */
/* update the hash table */
H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
@@ -3274,10 +2849,9 @@ H5C_resize_pinned_entry(void *thing, size_t new_size)
/* if the entry is in the skip list, update that too */
if ( entry_ptr->in_slist ) {
-
H5C__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size),\
(new_size));
- }
+ } /* end if */
/* update statistics just before changing the entry size */
H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \
@@ -3286,22 +2860,18 @@ H5C_resize_pinned_entry(void *thing, size_t new_size)
/* finally, update the entry size proper */
entry_ptr->size = new_size;
- } else if ( was_clean ) {
-
- H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
- }
-
-
- if ( ! (entry_ptr->in_slist) ) {
-
- H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
- }
+ if(!entry_ptr->in_slist) {
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
- H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+ if(entry_ptr->is_pinned) {
+ H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
+ } /* end if */
+ } /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C_resize_pinned_entry() */
+} /* H5C_resize_entry() */
/*-------------------------------------------------------------------------
@@ -3424,62 +2994,10 @@ done:
* check_write_permitted function if it is called.
*
* Return: Success: Ptr to the desired entry
- *
* Failure: NULL
*
* Programmer: John Mainzer - 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated for the addition of the hash table.
- *
- * JRM -- 10/28/04
- * Added code to set cache_full to TRUE whenever we try to
- * make space in the cache.
- *
- * JRM -- 11/12/04
- * Added code to call to H5C_make_space_in_cache()
- * after the call to H5C__auto_adjust_cache_size() if that
- * function sets the size_decreased flag is TRUE.
- *
- * JRM -- 4/25/05
- * The size_decreased flag can also be set to TRUE in
- * H5C_set_cache_auto_resize_config() if a new configuration
- * forces an immediate reduction in cache size. Modified
- * the code to deal with this eventuallity.
- *
- * JRM -- 6/24/05
- * Added support for the new write_permitted field of H5C_t.
- *
- * JRM -- 10/22/05
- * Hand optimizations.
- *
- * JRM -- 5/3/06
- * Added code to set the new dirtied field in
- * H5C_cache_entry_t to FALSE prior to return.
- *
- * JRM -- 6/23/06
- * Modified code to allow dirty entries to be loaded from
- * disk. This is necessary as a bug fix in the object
- * header code requires us to modify a header as it is read.
- *
- * JRM -- 3/28/07
- * Added the flags parameter and supporting code. At least
- * for now, this parameter is used to allow the entry to
- * be protected read only, thus allowing multiple protects.
- *
- * Also added code to allow multiple read only protects
- * of cache entries.
- *
- * JRM -- 7/27/07
- * Added code supporting the new evictions_enabled field
- * in H5C_t.
- *
- * JRM -- 1/3/08
- * Added to do a flash cache size increase if appropriate
- * when a large entry is loaded.
- *
* JRM -- 11/13/08
* Modified function to call H5C_make_space_in_cache() when
* the min_clean_size is violated, not just when there isn't
@@ -3919,13 +3437,8 @@ done:
*
* Programmer: John Mainzer, 10/5/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr)
{
@@ -3965,21 +3478,6 @@ done:
* Programmer: John Mainzer
* 10/8/04
*
- * Modifications:
- *
- * JRM -- 11/18/04
- * Reworked function to match major changes in
- * H5C_auto_size_ctl_t.
- *
- * JRM -- 4/25/05
- * Added code to set cache_ptr->size_decreased to TRUE
- * if the new configuration forces an immediate reduction
- * in cache size.
- *
- * JRM -- 12/31/07
- * Added code supporting the new flash cache size increase
- * code.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -4257,10 +3755,6 @@ done:
* Programmer: John Mainzer
* 7/27/07
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -4316,11 +3810,8 @@ done:
* Programmer: John Mainzer
* 1/20/06
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5C_set_prefix(H5C_t * cache_ptr,
char * prefix)
@@ -4362,11 +3853,8 @@ done:
* Programmer: John Mainzer
* 6/11/04
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5C_set_skip_flags(H5C_t * cache_ptr,
hbool_t skip_file_checks,
@@ -4407,11 +3895,8 @@ done:
* Programmer: John Mainzer
* 1/20/06
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5C_set_trace_file_ptr(H5C_t * cache_ptr,
FILE * trace_file_ptr)
@@ -4446,32 +3931,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * JRM -- 9/8/05
- * Updated function for the addition of cache entry size
- * change statistics.
- *
- * JRM -- 1/13/06
- * Added code to use the prefix field of H5C_t to allow
- * tagging of statistics output.
- *
- * JRM -- 3/21/06
- * Added code supporting the pinned entry related stats.
- *
- * JRM -- 8/9/06
- * More code supporting pinned entry related stats.
- *
- * JRM -- 8/23/06
- * Added code supporting new flush related statistics.
- *
- * JRM -- 3/31/07
- * Added code supporting the new write_protects,
- * read_protects, and max_read_protects fields.
- *
* JRM -- 11/13/08
* Added code displaying the max_clean_index_size and
* max_dirty_index_size.
@@ -4483,7 +3942,6 @@ done:
*
*-------------------------------------------------------------------------
*/
-
herr_t
H5C_stats(H5C_t * cache_ptr,
const char * cache_name,
@@ -4507,9 +3965,9 @@ H5C_stats(H5C_t * cache_ptr,
int64_t total_clears = 0;
int64_t total_flushes = 0;
int64_t total_evictions = 0;
- int64_t total_renames = 0;
- int64_t total_entry_flush_renames = 0;
- int64_t total_cache_flush_renames = 0;
+ int64_t total_moves = 0;
+ int64_t total_entry_flush_moves = 0;
+ int64_t total_cache_flush_moves = 0;
int64_t total_size_increases = 0;
int64_t total_size_decreases = 0;
int64_t total_entry_flush_size_changes = 0;
@@ -4560,11 +4018,11 @@ H5C_stats(H5C_t * cache_ptr,
total_clears += cache_ptr->clears[i];
total_flushes += cache_ptr->flushes[i];
total_evictions += cache_ptr->evictions[i];
- total_renames += cache_ptr->renames[i];
- total_entry_flush_renames
- += cache_ptr->entry_flush_renames[i];
- total_cache_flush_renames
- += cache_ptr->cache_flush_renames[i];
+ total_moves += cache_ptr->moves[i];
+ total_entry_flush_moves
+ += cache_ptr->entry_flush_moves[i];
+ total_cache_flush_moves
+ += cache_ptr->cache_flush_moves[i];
total_size_increases += cache_ptr->size_increases[i];
total_size_decreases += cache_ptr->size_decreases[i];
total_entry_flush_size_changes
@@ -4720,17 +4178,17 @@ H5C_stats(H5C_t * cache_ptr,
(long)total_evictions);
HDfprintf(stdout,
- "%s Total insertions(pinned) / renames = %ld(%ld) / %ld\n",
+ "%s Total insertions(pinned) / moves = %ld(%ld) / %ld\n",
cache_ptr->prefix,
(long)total_insertions,
(long)total_pinned_insertions,
- (long)total_renames);
+ (long)total_moves);
HDfprintf(stdout,
- "%s Total entry / cache flush renames = %ld / %ld\n",
+ "%s Total entry / cache flush moves = %ld / %ld\n",
cache_ptr->prefix,
- (long)total_entry_flush_renames,
- (long)total_cache_flush_renames);
+ (long)total_entry_flush_moves,
+ (long)total_cache_flush_moves);
HDfprintf(stdout, "%s Total entry size incrs / decrs = %ld / %ld\n",
cache_ptr->prefix,
@@ -4849,17 +4307,17 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->evictions[i]));
HDfprintf(stdout,
- "%s insertions(pinned) / renames = %ld(%ld) / %ld\n",
+ "%s insertions(pinned) / moves = %ld(%ld) / %ld\n",
cache_ptr->prefix,
(long)(cache_ptr->insertions[i]),
(long)(cache_ptr->pinned_insertions[i]),
- (long)(cache_ptr->renames[i]));
+ (long)(cache_ptr->moves[i]));
HDfprintf(stdout,
- "%s entry / cache flush renames = %ld / %ld\n",
+ "%s entry / cache flush moves = %ld / %ld\n",
cache_ptr->prefix,
- (long)(cache_ptr->entry_flush_renames[i]),
- (long)(cache_ptr->cache_flush_renames[i]));
+ (long)(cache_ptr->entry_flush_moves[i]),
+ (long)(cache_ptr->cache_flush_moves[i]));
HDfprintf(stdout,
"%s size increases / decreases = %ld / %ld\n",
@@ -4932,31 +4390,6 @@ done:
*
* Programmer: John Mainzer, 4/28/04
*
- * Modifications:
- *
- * JRM - 7/21/04
- * Updated for hash table related statistics.
- *
- * JRM - 9/8/05
- * Updated for size increase / decrease statistics.
- *
- * JRM - 3/20/06
- * Updated for pin / unpin related statistics.
- *
- * JRM - 8/9/06
- * Further updates for pin related statistics.
- *
- * JRM 8/23/06
- * Added initialization code for new flush related statistics.
- *
- * JRM 2/16/07
- * Added conditional compile code to avoid unused parameter
- * warning in the production build.
- *
- * JRM 3/31/07
- * Added initialization for the new write_protects,
- * read_protects, and max_read_protects fields.
- *
* JRM 11/13/08
* Added initialization for the new max_clean_index_size and
* max_dirty_index_size fields.
@@ -4968,7 +4401,6 @@ done:
*
*-------------------------------------------------------------------------
*/
-
void
#ifndef NDEBUG
H5C_stats__reset(H5C_t * cache_ptr)
@@ -5000,9 +4432,9 @@ H5C_stats__reset(H5C_t UNUSED * cache_ptr)
cache_ptr->clears[i] = 0;
cache_ptr->flushes[i] = 0;
cache_ptr->evictions[i] = 0;
- cache_ptr->renames[i] = 0;
- cache_ptr->entry_flush_renames[i] = 0;
- cache_ptr->cache_flush_renames[i] = 0;
+ cache_ptr->moves[i] = 0;
+ cache_ptr->entry_flush_moves[i] = 0;
+ cache_ptr->cache_flush_moves[i] = 0;
cache_ptr->pins[i] = 0;
cache_ptr->unpins[i] = 0;
cache_ptr->dirty_pins[i] = 0;
@@ -5186,85 +4618,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated the function for the addition of the hash table.
- * In particular, we now add dirty entries to the tree if
- * they aren't in the tree already.
- *
- * JRM -- 1/6/05
- * Added the flags parameter, and code supporting
- * H5C__SET_FLUSH_MARKER_FLAG. Note that this flag is
- * ignored unless the new entry is dirty. Also note that
- * once the flush_marker field of an entry is set, the
- * only way it can be reset is by being flushed.
- *
- * JRM -- 6/3/05
- * Added the dirtied parameter and supporting code. This
- * is part of an effort to move management of the is_dirty
- * field into the cache code. This has become necessary
- * to repair a cache coherency bug in PHDF5.
- *
- * JRM -- 7/5/05
- * Added code supporting the new clear_on_unprotect field
- * of H5C_cache_entry_t. This change is also part of the
- * above mentioned cache coherency bug fix in PHDF5.
- *
- * JRM -- 9/8/05
- * Added the size_changed and new_size parameters and the
- * supporting code. Since the metadata cache synchronizes
- * on dirty bytes creation in the PHDF5 case, we must now
- * track changes in entry size.
- *
- * Note that the new_size parameter is ignored unless the
- * size_changed parameter is TRUE. In this case, the new_size
- * must be positive.
- *
- * Also observe that if size_changed is TRUE, dirtied must be
- * TRUE.
- *
- * JRM -- 9/23/05
- * Moved the size_changed parameter into flags.
- *
- * JRM -- 3/21/06
- * Unpdated function to pin and unpin entries as directed via
- * the new H5C__PIN_ENTRY_FLAG and H5C__UNPIN_ENTRY_FLAG flags.
- *
- * JRM -- 5/3/06
- * Added code to make use of the new dirtied field in
- * H5C_cache_entry_t. If this field is TRUE, it is the
- * equivalent of setting the H5C__DIRTIED_FLAG.
- *
- * JRM -- 3/29/07
- * Modified function to allow a entry to be protected
- * more than once if the entry is protected read only.
- *
- * Also added sanity checks using the new is_read_only and
- * ro_ref_count parameters.
- *
- * JRM -- 12/31/07
- * Modified function to support flash cache resizes.
- *
- * QAK -- 1/31/08
- * Modified function to support freeing file space in client's
- * 'dest' callback routine.
- *
- * QAK -- 2/07/08
- * Separated "destroy entry" concept from "remove entry from
- * cache" concept, by adding the 'take_ownership' flag.
- *
- * JRM -- 11/5/08
- * Added code to update the clean_index_size and
- * dirty_index_size fields of H5C_t in cases where the
- * the entry was clean on protect, was marked dirty on
- * unprotect, and did not change its size. Do this via
- * a call to H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY().
- *
- * If the size changed, this case is already dealt with by
- * by the pre-existing call to
- * H5C__UPDATE_INDEX_FOR_SIZE_CHANGE().
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -5274,14 +4627,12 @@ H5C_unprotect(H5F_t * f,
const H5C_class_t * type,
haddr_t addr,
void * thing,
- unsigned int flags,
- size_t new_size)
+ unsigned int flags)
{
H5C_t * cache_ptr;
hbool_t deleted;
hbool_t dirtied;
hbool_t set_flush_marker;
- hbool_t size_changed;
hbool_t pin_entry;
hbool_t unpin_entry;
hbool_t free_file_space;
@@ -5290,8 +4641,6 @@ H5C_unprotect(H5F_t * f,
#ifdef H5_HAVE_PARALLEL
hbool_t clear_entry = FALSE;
#endif /* H5_HAVE_PARALLEL */
- herr_t result;
- size_t size_increase = 0;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * test_entry_ptr;
herr_t ret_value = SUCCEED; /* Return value */
@@ -5301,18 +4650,11 @@ H5C_unprotect(H5F_t * f,
deleted = ( (flags & H5C__DELETED_FLAG) != 0 );
dirtied = ( (flags & H5C__DIRTIED_FLAG) != 0 );
set_flush_marker = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 );
- size_changed = ( (flags & H5C__SIZE_CHANGED_FLAG) != 0 );
pin_entry = ( (flags & H5C__PIN_ENTRY_FLAG) != 0 );
unpin_entry = ( (flags & H5C__UNPIN_ENTRY_FLAG) != 0 );
free_file_space = ( (flags & H5C__FREE_FILE_SPACE_FLAG) != 0 );
take_ownership = ( (flags & H5C__TAKE_OWNERSHIP_FLAG) != 0 );
- /* Changing the size of an entry dirties it. Thus, set the
- * dirtied flag if the size_changed flag is set.
- */
-
- dirtied |= size_changed;
-
HDassert( f );
HDassert( f->shared );
@@ -5326,9 +4668,6 @@ H5C_unprotect(H5F_t * f,
HDassert( type->flush );
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
- HDassert( ( size_changed == TRUE ) || ( size_changed == FALSE ) );
- HDassert( ( ! size_changed ) || ( dirtied ) );
- HDassert( ( ! size_changed ) || ( new_size > 0 ) );
HDassert( ! ( pin_entry && unpin_entry ) );
HDassert( ( ! free_file_space ) || ( deleted ) ); /* deleted flag must accompany free_file_space */
HDassert( ( ! take_ownership ) || ( deleted ) ); /* deleted flag must accompany take_ownership */
@@ -5440,58 +4779,7 @@ H5C_unprotect(H5F_t * f,
/* mark the entry as dirty if appropriate */
entry_ptr->is_dirty = ( (entry_ptr->is_dirty) || dirtied );
- /* update for change in entry size if necessary */
- if ( ( size_changed ) && ( entry_ptr->size != new_size ) ) {
-
- /* do a flash cache size increase if appropriate */
- if ( cache_ptr->flash_size_increase_possible ) {
-
- if ( new_size > entry_ptr->size ) {
-
- size_increase = new_size - entry_ptr->size;
-
- if ( size_increase >=
- cache_ptr->flash_size_increase_threshold ) {
-
- result = H5C__flash_increase_cache_size(cache_ptr,
- entry_ptr->size,
- new_size);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, \
- "H5C__flash_increase_cache_size failed.")
- }
- }
- }
- }
-
- /* update the protected list */
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr->pl_len), \
- (cache_ptr->pl_size), \
- (entry_ptr->size), (new_size));
-
- /* update the hash table */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE((cache_ptr), (entry_ptr->size), \
- (new_size), (entry_ptr), \
- (was_clean));
-
- /* if the entry is in the skip list, update that too */
- if ( entry_ptr->in_slist ) {
-
- H5C__UPDATE_SLIST_FOR_SIZE_CHANGE((cache_ptr), \
- (entry_ptr->size),\
- (new_size));
- }
-
- /* update statistics just before changing the entry size */
- H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE((cache_ptr), (entry_ptr), \
- (new_size));
-
- /* finally, update the entry size proper */
- entry_ptr->size = new_size;
-
- } else if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) {
+ if ( ( was_clean ) && ( entry_ptr->is_dirty ) ) {
H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr)
}
@@ -5511,8 +4799,8 @@ H5C_unprotect(H5F_t * f,
}
- /* H5C__UPDATE_RP_FOR_UNPROTECT will places the unprotected entry on
- * the pinned entry list if entry_ptr->is_pined is TRUE.
+ /* H5C__UPDATE_RP_FOR_UNPROTECT will place the unprotected entry on
+ * the pinned entry list if entry_ptr->is_pinned is TRUE.
*/
H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, FAIL)
@@ -5663,15 +4951,8 @@ done:
* Programmer: John Mainzer
* 3/23/05
*
- * Modifications:
- *
- * Added validation for the flash increment fields.
- *
- * JRM -- 12/31/07
- *
*-------------------------------------------------------------------------
*/
-
herr_t
H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
unsigned int tests)
@@ -6281,16 +5562,6 @@ done:
*
* Programmer: John Mainzer, 10/7/04
*
- * Modifications:
- *
- * JRM -- 11/18/04
- * Major re-write to support ageout method of cache size
- * reduction, and to adjust to changes in the
- * H5C_auto_size_ctl_t structure.
- *
- * JRM -- 1/5/08
- * Added support for flash cache size increases.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -6644,10 +5915,6 @@ done:
*
* Programmer: John Mainzer, 11/18/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -6770,13 +6037,8 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
static herr_t
H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
{
@@ -6902,23 +6164,6 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
- * Modifications:
- *
- * JRM -- 10/13/07
- * Added code to detect and manage the case in which a
- * flush callback changes the LRU-list out from under
- * the function. The only way I can think of in which this
- * can happen is if a flush function loads an entry
- * into the cache that isn't there already. Quincey tells
- * me that this will never happen, but I'm not sure I
- * believe him.
- *
- * Note that this is a pretty bad scenario if it ever
- * happens. The code I have added should allow us to
- * handle the situation under all but the worst conditions,
- * but one can argue that I should just scream and die if I
- * ever detect the condidtion.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -7137,13 +6382,8 @@ done:
*
* Programmer: John Mainzer, 11/19/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
static herr_t
H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
{
@@ -7219,13 +6459,8 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
static herr_t
H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)
{
@@ -7305,13 +6540,8 @@ done:
*
* Programmer: John Mainzer, 11/19/04
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
-
static herr_t
H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
{
@@ -7399,10 +6629,6 @@ done:
*
* Programmer: John Mainzer, 12/31/07
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -7588,30 +6814,6 @@ done:
* Programmer: John Mainzer
* 3/24/065
*
- * Modifications:
- *
- * To support the fractal heap, the cache must now deal with
- * entries being dirtied, resized, and/or renamed inside
- * flush callbacks. Updated function to support this.
- *
- * -- JRM 8/27/06
- *
- * Added code to detect and manage the case in which a
- * flush callback changes the s-list out from under
- * the function. The only way I can think of in which this
- * can happen is if a flush function loads an entry
- * into the cache that isn't there already. Quincey tells
- * me that this will never happen, but I'm not sure I
- * believe him.
- *
- * Note that this is a pretty bad scenario if it ever
- * happens. The code I have added should allow us to
- * handle the situation under all but the worst conditions,
- * but one can argue that I should just scream and die if I
- * ever detect the condidtion.
- *
- * -- JRM 10/13/07
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -7672,14 +6874,14 @@ H5C_flush_invalidate_cache(H5F_t * f,
* unpin themselves, or until the number of pinned entries stops
* declining. In this later case, we scream and die.
*
- * Since the fractal heap can dirty, resize, and/or rename entries
+ * Since the fractal heap can dirty, resize, and/or move entries
* in is flush callback, it is possible that the cache will still
* contain dirty entries at this point. If so, we must make up to
* H5C__MAX_PASSES_ON_FLUSH more passes through the skip list
* to allow it to empty. If is is not empty at this point, we again
* scream and die.
*
- * Further, since clean entries can be dirtied, resized, and/or renamed
+ * Further, since clean entries can be dirtied, resized, and/or moved
* as the result of a flush call back (either the entries own, or that
* for some other cache entry), we can no longer promise to flush
* the cache entries in increasing address order.
@@ -7748,7 +6950,7 @@ H5C_flush_invalidate_cache(H5F_t * f,
initial_slist_size = cache_ptr->slist_size;
/* There is also the possibility that entries will be
- * dirtied, resized, and/or renamed as the result of
+ * dirtied, resized, and/or moved as the result of
* calls to the flush callbacks. We use the slist_len_increase
* and slist_size_increase increase fields in struct H5C_t
* to track these changes for purpose of sanity checking.
@@ -8167,58 +7369,6 @@ done:
*
* Programmer: John Mainzer, 5/5/04
*
- * Modifications:
- *
- * JRM -- 7/21/04
- * Updated function for the addition of the hash table.
- *
- * QAK -- 11/26/04
- * Updated function for the switch from TBBTs to skip lists.
- *
- * JRM -- 1/6/05
- * Updated function to reset the flush_marker field.
- * Also replace references to H5F_FLUSH_INVALIDATE and
- * H5F_FLUSH_CLEAR_ONLY with references to
- * H5C__FLUSH_INVALIDATE_FLAG and H5C__FLUSH_CLEAR_ONLY_FLAG
- * respectively.
- *
- * JRM -- 6/24/05
- * Added code to remove dirty entries from the slist after
- * they have been flushed. Also added a sanity check that
- * will scream if we attempt a write when writes are
- * completely disabled.
- *
- * JRM -- 7/5/05
- * Added code to call the new log_flush callback whenever
- * a dirty entry is written to disk. Note that the callback
- * is not called if the H5C__FLUSH_CLEAR_ONLY_FLAG is set,
- * as there is no write to file in this case.
- *
- * JRM -- 8/21/06
- * Added code maintaining the flush_in_progress and
- * destroy_in_progress fields in H5C_cache_entry_t.
- *
- * Also added flush_flags parameter to the call to
- * type_ptr->flush() so that the flush routine can report
- * whether the entry has been resized or renamed. Added
- * code using the flush_flags variable to detect the case
- * in which the target entry is resized during flush, and
- * update the caches data structures accordingly.
- *
- * JRM -- 3/29/07
- * Added sanity checks on the new is_read_only and
- * ro_ref_count fields.
- *
- * QAK -- 2/07/08
- * Separated "destroy entry" concept from "remove entry from
- * cache" concept, by adding the 'take_ownership' flag and
- * the "destroy_entry" variable.
- *
- * JRM -- 11/5/08
- * Added call to H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN() to
- * maintain the new clean_index_size and clean_index_size
- * fields of H5C_t.
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -8397,8 +7547,8 @@ H5C_flush_single_entry(H5F_t * f,
* We must do deletions now as the callback routines will free the
* entry if destroy is true.
*
- * Note that it is possible that the entry will be renamed during
- * its call to flush. This will upset H5C_rename_entry() if we
+ * Note that it is possible that the entry will be moved during
+ * its call to flush. This will upset H5C_move_entry() if we
* don't tell it that it doesn't have to worry about updating the
* index and SLIST. Use the destroy_in_progress field for this
* purpose.
@@ -8641,9 +7791,9 @@ H5C_flush_single_entry(H5F_t * f,
* change this test accordingly.
*
* NB: While this test detects entryies that attempt
- * to resize or rename themselves during a flush
+ * to resize or move themselves during a flush
* in the parallel case, it will not detect an
- * entry that dirties, resizes, and/or renames
+ * entry that dirties, resizes, and/or moves
* other entries during its flush.
*
* From what Quincey tells me, this test is
@@ -8656,7 +7806,7 @@ H5C_flush_single_entry(H5F_t * f,
if ( cache_ptr->aux_ptr != NULL ) {
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "resize/rename in serialize occured in parallel case.")
+ "resize/move in serialize occured in parallel case.")
}
}
@@ -8741,9 +7891,9 @@ H5C_flush_single_entry(H5F_t * f,
}
}
- if ( (flush_flags & H5C_CALLBACK__RENAMED_FLAG) != 0 ) {
+ if ( (flush_flags & H5C_CALLBACK__MOVED_FLAG) != 0 ) {
- /* The entry was renamed as the result of the flush.
+ /* The entry was moved as the result of the flush.
*
* Most likely, the entry was compressed, and the
* new version is larger than the old and thus had
@@ -8795,25 +7945,6 @@ done:
*
* Programmer: John Mainzer, 5/18/04
*
- * Modifications:
- *
- * JRM - 7/21/04
- * Updated function for the addition of the hash table.
- *
- * JRM - 6/23/06
- * Deleted assertion that verified that a newly loaded
- * entry is clean. Due to a bug fix, this need not be
- * the case, as our code will attempt to repair errors
- * on load.
- *
- * JRM - 8/21/06
- * Added initialization for the new flush_in_progress and
- * destroy in progress fields.
- *
- * JRM - 3/29/07
- * Added initialization for the new is_read_only and
- * ro_ref_count fields.
- *
* QAK -- 1/31/08
* Added initialization for the new free_file_space_on_destroy
* field.
@@ -8832,29 +7963,28 @@ H5C_load_entry(H5F_t * f,
hbool_t UNUSED skip_file_checks)
#endif /* NDEBUG */
{
- void * thing = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
- unsigned u; /* Local index variable */
- void * ret_value = NULL; /* Return value */
+ void * thing = NULL; /* Pointer to thing loaded */
+ H5C_cache_entry_t * entry; /* Alias for thing loaded, as cache entry */
+ unsigned u; /* Local index variable */
+ void * ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5C_load_entry)
- HDassert( f );
- HDassert( f->shared );
- HDassert( f->shared->cache );
- HDassert( skip_file_checks || f );
- HDassert( type );
- HDassert( type->load );
- HDassert( type->size );
- HDassert( H5F_addr_defined(addr) );
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(skip_file_checks || f);
+ HDassert(type);
+ HDassert(type->load);
+ HDassert(type->size);
+ HDassert(H5F_addr_defined(addr));
- if ( NULL == (thing = (type->load)(f, dxpl_id, addr, udata)) ) {
+ if(NULL == (thing = (type->load)(f, dxpl_id, addr, udata)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "unable to load entry")
- }
- entry_ptr = (H5C_cache_entry_t *)thing;
+ entry = (H5C_cache_entry_t *)thing;
/* In general, an entry should be clean just after it is loaded.
*
@@ -8865,11 +7995,11 @@ H5C_load_entry(H5F_t * f,
*
* To support this bug fix, I have replace the old assert:
*
- * HDassert( entry_ptr->is_dirty == FALSE );
+ * HDassert( entry->is_dirty == FALSE );
*
* with:
*
- * HDassert( ( entry_ptr->is_dirty == FALSE ) || ( type->id == 5 ) );
+ * HDassert( ( entry->is_dirty == FALSE ) || ( type->id == 5 ) );
*
* Note that type id 5 is associated with object headers in the metadata
* cache.
@@ -8879,56 +8009,52 @@ H5C_load_entry(H5F_t * f,
* metadata cache.
*/
- HDassert( ( entry_ptr->is_dirty == FALSE ) || ( type->id == 5 ) );
+ HDassert( ( entry->is_dirty == FALSE ) || ( type->id == 5 ) );
#ifndef NDEBUG
- entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
#endif /* NDEBUG */
- entry_ptr->cache_ptr = f->shared->cache;
- entry_ptr->addr = addr;
- entry_ptr->type = type;
- entry_ptr->is_protected = FALSE;
- entry_ptr->is_read_only = FALSE;
- entry_ptr->ro_ref_count = 0;
- entry_ptr->in_slist = FALSE;
- entry_ptr->flush_marker = FALSE;
+ entry->cache_ptr = f->shared->cache;
+ entry->addr = addr;
+ entry->type = type;
+ entry->is_protected = FALSE;
+ entry->is_read_only = FALSE;
+ entry->ro_ref_count = 0;
+ entry->in_slist = FALSE;
+ entry->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
- entry_ptr->clear_on_unprotect = FALSE;
+ entry->clear_on_unprotect = FALSE;
#endif /* H5_HAVE_PARALLEL */
- entry_ptr->flush_in_progress = FALSE;
- entry_ptr->destroy_in_progress = FALSE;
- entry_ptr->free_file_space_on_destroy = FALSE;
+ entry->flush_in_progress = FALSE;
+ entry->destroy_in_progress = FALSE;
+ entry->free_file_space_on_destroy = FALSE;
- if ( (type->size)(f, thing, &(entry_ptr->size)) < 0 ) {
+ if((type->size)(f, thing, &(entry->size)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, NULL, \
- "Can't get size of thing")
- }
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGETSIZE, NULL, "Can't get size of thing")
- HDassert( entry_ptr->size < H5C_MAX_ENTRY_SIZE );
+ HDassert( entry->size < H5C_MAX_ENTRY_SIZE );
/* Initialize flush dependency height fields */
- entry_ptr->flush_dep_parent = NULL;
+ entry->flush_dep_parent = NULL;
for(u = 0; u < H5C__NUM_FLUSH_DEP_HEIGHTS; u++)
- entry_ptr->child_flush_dep_height_rc[u] = 0;
- entry_ptr->flush_dep_height = 0;
-
- entry_ptr->ht_next = NULL;
- entry_ptr->ht_prev = NULL;
+ entry->child_flush_dep_height_rc[u] = 0;
+ entry->flush_dep_height = 0;
+ entry->ht_next = NULL;
+ entry->ht_prev = NULL;
- entry_ptr->next = NULL;
- entry_ptr->prev = NULL;
+ entry->next = NULL;
+ entry->prev = NULL;
- entry_ptr->aux_next = NULL;
- entry_ptr->aux_prev = NULL;
+ entry->aux_next = NULL;
+ entry->aux_prev = NULL;
- H5C__RESET_CACHE_ENTRY_STATS(entry_ptr);
+ H5C__RESET_CACHE_ENTRY_STATS(entry);
ret_value = thing;
done:
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_load_entry() */
@@ -8965,44 +8091,6 @@ done:
*
* Programmer: John Mainzer, 5/14/04
*
- * Modifications:
- *
- * JRM --7/21/04
- * Minor modifications in support of the addition of a hash
- * table to facilitate lookups.
- *
- * JRM -- 11/22/04
- * Added the first_flush_ptr parameter, which replaces the
- * old first_flush local variable. This allows the function
- * to coordinate on the first flush issue with other functions.
- *
- * JRM -- 12/13/04
- * Added code to skip over epoch markers if present.
- *
- * JRM -- 1/3/06
- * Modified function to work correctly when the the cache
- * is not full. This case occurs when we need to flush to
- * min clean size before the cache has filled.
- *
- * JRM -- 3/29/07
- * Added sanity checks using the new is_read_only and
- * ro_ref_count fields.
- *
- * JRM -- 10/13/07
- * Added code to detect and manage the case in which a
- * flush callback changes the LRU-list out from under
- * the function. The only way I can think of in which this
- * can happen is if a flush function loads an entry
- * into the cache that isn't there already. Quincey tells
- * me that this will never happen, but I'm not sure I
- * believe him.
- *
- * Note that this is a pretty bad scenario if it ever
- * happens. The code I have added should allow us to
- * handle the situation under all but the worst conditions,
- * but one can argue that I should just scream and die if I
- * ever detect the condidtion.
- *
* JRM -- 11/13/08
* Modified function to always observe the min_clean_size
* whether we are maintaining the clean and dirt LRU lists
@@ -9335,11 +8423,8 @@ done:
*
* Programmer: John Mainzer, 7/14/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#if H5C_DO_EXTREME_SANITY_CHECKS
static herr_t
@@ -9458,11 +8543,8 @@ done:
*
* Programmer: John Mainzer, 7/14/05
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-
#if H5C_DO_EXTREME_SANITY_CHECKS
static herr_t
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 21151d0..e1dffa4 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -44,7 +44,7 @@
#include "H5SLprivate.h" /* Skip lists */
/* With the introduction of the fractal heap, it is now possible for
- * entries to be dirtied, resized, and/or renamed in the flush callbacks.
+ * entries to be dirtied, resized, and/or moved in the flush callbacks.
* As a result, on flushes, it may be necessary to make multiple passes
* through the slist before it is empty. The H5C__MAX_PASSES_ON_FLUSH
* #define is used to set an upper limit on the number of passes.
@@ -283,7 +283,7 @@
* some optimizations when I get to it.
*
* With the addition of the fractal heap, the cache must now deal with
- * the case in which entries may be dirtied, renamed, or have their sizes
+ * the case in which entries may be dirtied, moved, or have their sizes
* changed during a flush. To allow sanity checks in this situation, the
* following two fields have been added. They are only compiled in when
* H5C_DO_SANITY_CHECKS is TRUE.
@@ -669,19 +669,19 @@
* equal to the array index has been evicted from the cache in
* the current epoch.
*
- * renames: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
+ * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
* are used to record the number of times an entry with type
- * id equal to the array index has been renamed in the current
+ * id equal to the array index has been moved in the current
* epoch.
*
- * entry_flush_renames: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
- * with type id equal to the array index has been renamed
+ * with type id equal to the array index has been moved
* during its flush callback in the current epoch.
*
- * cache_flush_renames: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
+ * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
- * with type id equal to the array index has been renamed
+ * with type id equal to the array index has been moved
* during a cache flush in the current epoch.
*
* pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
@@ -960,9 +960,9 @@ struct H5C_t
int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t renames[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t entry_flush_renames[H5C__MAX_NUM_TYPE_IDS + 1];
- int64_t cache_flush_renames[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
+ int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t pins[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t unpins[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS + 1];
@@ -1493,14 +1493,14 @@ if ( ( (entry_ptr) == NULL ) || \
if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
(cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
-#define H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr) \
+#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \
if ( cache_ptr->flush_in_progress ) { \
- ((cache_ptr)->cache_flush_renames[(entry_ptr)->type->id])++; \
+ ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \
} \
if ( entry_ptr->flush_in_progress ) { \
- ((cache_ptr)->entry_flush_renames[(entry_ptr)->type->id])++; \
+ ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \
} \
- (((cache_ptr)->renames)[(entry_ptr)->type->id])++;
+ (((cache_ptr)->moves)[(entry_ptr)->type->id])++;
#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\
if ( cache_ptr->flush_in_progress ) { \
@@ -1728,7 +1728,7 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
#define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
-#define H5C__UPDATE_STATS_FOR_RENAME(cache_ptr, entry_ptr)
+#define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)
#define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr)
#define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr)
@@ -2173,7 +2173,7 @@ if ( (cache_ptr)->index_size != \
* checks in the flush routines.
*
* All this is needed as the fractal heap needs to be
- * able to dirty, resize and/or rename entries during the
+ * able to dirty, resize and/or move entries during the
* flush.
*
*-------------------------------------------------------------------------
@@ -2312,7 +2312,7 @@ if ( (cache_ptr)->index_size != \
* that are used in sanity checks in the flush routines.
*
* All this is needed as the fractal heap needs to be
- * able to dirty, resize and/or rename entries during the
+ * able to dirty, resize and/or move entries during the
* flush.
*
*-------------------------------------------------------------------------
@@ -3057,10 +3057,10 @@ if ( (cache_ptr)->index_size != \
/*-------------------------------------------------------------------------
*
- * Macro: H5C__UPDATE_RP_FOR_RENAME
+ * Macro: H5C__UPDATE_RP_FOR_MOVE
*
* Purpose: Update the replacement policy data structures for a
- * rename of the specified cache entry.
+ * move of the specified cache entry.
*
* At present, we only support the modified LRU policy, so
* this function deals with that case unconditionally. If
@@ -3071,49 +3071,12 @@ if ( (cache_ptr)->index_size != \
*
* Programmer: John Mainzer, 5/17/04
*
- * Modifications:
- *
- * JRM - 7/27/04
- * Converted the function H5C_update_rp_for_rename() to the
- * macro H5C__UPDATE_RP_FOR_RENAME in an effort to squeeze
- * a bit more performance out of the cache.
- *
- * At least for the first cut, I am leaving the comments and
- * white space in the macro. If they cause dificulties with
- * pre-processor, I'll have to remove them.
- *
- * JRM - 7/28/04
- * Split macro into two version, one supporting the clean and
- * dirty LRU lists, and the other not. Yet another attempt
- * at optimization.
- *
- * JRM - 6/23/05
- * Added the was_dirty parameter. It is possible that
- * the entry was clean when it was renamed -- if so it
- * it is in the clean LRU regardless of the current
- * value of the is_dirty field.
- *
- * At present, all renamed entries are forced to be
- * dirty. This macro is a bit more general that that,
- * to allow it to function correctly should that policy
- * be relaxed in the future.
- *
- * JRM - 3/17/06
- * Modified macro to do nothing if the entry is pinned.
- * In this case, the entry is on the pinned entry list, not
- * in the replacement policy data structures, so there is
- * nothing to be done.
- *
- * JRM - 3/28/07
- * Added sanity checks using the new is_read_only and
- * ro_ref_count fields of struct H5C_cache_entry_t.
- *
*-------------------------------------------------------------------------
*/
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
-#define H5C__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, fail_val) \
+#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
@@ -3187,11 +3150,11 @@ if ( (cache_ptr)->index_size != \
\
/* End modified LRU specific code. */ \
} \
-} /* H5C__UPDATE_RP_FOR_RENAME */
+} /* H5C__UPDATE_RP_FOR_MOVE */
#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-#define H5C__UPDATE_RP_FOR_RENAME(cache_ptr, entry_ptr, was_dirty, fail_val) \
+#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
@@ -3220,7 +3183,7 @@ if ( (cache_ptr)->index_size != \
\
/* End modified LRU specific code. */ \
} \
-} /* H5C__UPDATE_RP_FOR_RENAME */
+} /* H5C__UPDATE_RP_FOR_MOVE */
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 2cdfbee..3f38500 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -123,7 +123,7 @@ typedef struct H5C_t H5C_t;
#define H5C_CALLBACK__NO_FLAGS_SET 0x0
#define H5C_CALLBACK__SIZE_CHANGED_FLAG 0x1
-#define H5C_CALLBACK__RENAMED_FLAG 0x2
+#define H5C_CALLBACK__MOVED_FLAG 0x2
/* Actions that can be reported to 'notify' client callback */
typedef enum H5C_notify_action_t {
@@ -293,10 +293,10 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t * cache_ptr,
*
* This field is set to FALSE in the protect call, and may
* be set to TRUE by the
- * H5C_mark_pinned_or_protected_entry_dirty()
+ * H5C_mark_entry_dirty()
* call at an time prior to the unprotect call.
*
- * The H5C_mark_pinned_or_protected_entry_dirty() call exists
+ * The H5C_mark_entry_dirty() call exists
* as a convenience function for the fractal heap code which
* may not know if an entry is protected or pinned, but knows
* that is either protected or pinned. The dirtied field was
@@ -995,7 +995,6 @@ typedef struct H5C_auto_size_ctl_t
* H5C__SET_FLUSH_MARKER_FLAG
* H5C__DELETED_FLAG
* H5C__DIRTIED_FLAG
- * H5C__SIZE_CHANGED_FLAG
* H5C__PIN_ENTRY_FLAG
* H5C__UNPIN_ENTRY_FLAG
* H5C__FREE_FILE_SPACE_FLAG
@@ -1025,14 +1024,13 @@ typedef struct H5C_auto_size_ctl_t
#define H5C__SET_FLUSH_MARKER_FLAG 0x0001
#define H5C__DELETED_FLAG 0x0002
#define H5C__DIRTIED_FLAG 0x0004
-#define H5C__SIZE_CHANGED_FLAG 0x0008
-#define H5C__PIN_ENTRY_FLAG 0x0010
-#define H5C__UNPIN_ENTRY_FLAG 0x0020
-#define H5C__FLUSH_INVALIDATE_FLAG 0x0040
-#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0080
-#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0100
-#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0200
-#define H5C__READ_ONLY_FLAG 0x0400
+#define H5C__PIN_ENTRY_FLAG 0x0008
+#define H5C__UNPIN_ENTRY_FLAG 0x0010
+#define H5C__FLUSH_INVALIDATE_FLAG 0x0020
+#define H5C__FLUSH_CLEAR_ONLY_FLAG 0x0040
+#define H5C__FLUSH_MARKED_ENTRIES_FLAG 0x0080
+#define H5C__FLUSH_IGNORE_PROTECTED_FLAG 0x0100
+#define H5C__READ_ONLY_FLAG 0x0200
#define H5C__FREE_FILE_SPACE_FLAG 0x0800
#define H5C__TAKE_OWNERSHIP_FLAG 0x1000
@@ -1118,13 +1116,9 @@ H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t * f,
int32_t ce_array_len,
haddr_t *ce_array_ptr);
-H5_DLL herr_t H5C_mark_pinned_entry_dirty(void * thing,
- hbool_t size_changed,
- size_t new_size);
+H5_DLL herr_t H5C_mark_entry_dirty(void *thing);
-H5_DLL herr_t H5C_mark_pinned_or_protected_entry_dirty(void *thing);
-
-H5_DLL herr_t H5C_rename_entry(H5C_t * cache_ptr,
+H5_DLL herr_t H5C_move_entry(H5C_t * cache_ptr,
const H5C_class_t * type,
haddr_t old_addr,
haddr_t new_addr);
@@ -1143,7 +1137,7 @@ H5_DLL void * H5C_protect(H5F_t * f,
H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr);
-H5_DLL herr_t H5C_resize_pinned_entry(void *thing, size_t new_size);
+H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
H5C_auto_size_ctl_t *config_ptr);
@@ -1176,8 +1170,7 @@ H5_DLL herr_t H5C_unprotect(H5F_t * f,
const H5C_class_t * type,
haddr_t addr,
void * thing,
- unsigned int flags,
- size_t new_size);
+ unsigned int flags);
H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
unsigned int tests);
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 4651d74..bd2bd7e 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -51,6 +51,9 @@
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
+#ifdef H5_HAVE_PARALLEL
+#include "H5ACprivate.h" /* Metadata cache */
+#endif /* H5_HAVE_PARALLEL */
#include "H5Dpkg.h" /* Dataset functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5FLprivate.h" /* Free Lists */
@@ -99,20 +102,13 @@
/* Local Typedefs */
/******************/
-/* Stack of chunks to remove during a "prune" iteration */
-typedef struct H5D_chunk_prune_stack_t {
- H5D_chunk_rec_t rec; /* Chunk record */
- struct H5D_chunk_prune_stack_t *next; /* Next chunk in stack */
-} H5D_chunk_prune_stack_t;
-
/* Callback info for iteration to prune chunks */
typedef struct H5D_chunk_it_ud1_t {
H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */
const H5D_chk_idx_info_t *idx_info; /* Chunked index info */
const H5D_io_info_t *io_info; /* I/O info for dataset operation */
- const hsize_t *dims; /* New dataset dimensions */
- const hbool_t *shrunk_dims; /* Dimensions which have been shrunk */
- H5D_chunk_prune_stack_t *rm_stack; /* Stack of chunks outside the new dimensions */
+ const hsize_t *space_dim; /* New dataset dimensions */
+ const hbool_t *shrunk_dim; /* Dimensions which have been shrunk */
H5S_t *chunk_space; /* Dataspace for a chunk */
uint32_t elmts_per_chunk;/* Elements in chunk */
hsize_t *hyper_start; /* Starting location of hyperslab */
@@ -195,6 +191,8 @@ static herr_t H5D_chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims
const hsize_t *curr_dims);
static void *H5D_chunk_alloc(size_t size, const H5O_pline_t *pline);
static void *H5D_chunk_xfree(void *chk, const H5O_pline_t *pline);
+static void *H5D_chunk_realloc(void *chk, size_t size,
+ const H5O_pline_t *pline);
static herr_t H5D_chunk_cinfo_cache_update(H5D_chunk_cached_t *last,
const H5D_chunk_ud_t *udata);
static herr_t H5D_free_chunk_info(void *item, void *key, void *opdata);
@@ -268,9 +266,6 @@ H5FL_DEFINE(H5D_chunk_info_t);
/* Declare a free list to manage the chunk sequence information */
H5FL_BLK_DEFINE_STATIC(chunk);
-/* Declare a free list to manage H5D_chunk_sl_ck_t objects */
-H5FL_DEFINE_STATIC(H5D_chunk_prune_stack_t);
-
/*-------------------------------------------------------------------------
@@ -909,6 +904,39 @@ H5D_chunk_xfree(void *chk, const H5O_pline_t *pline)
} /* H5D_chunk_xfree() */
+/*-------------------------------------------------------------------------
+ * Function: H5D_chunk_realloc
+ *
+ * Purpose: Reallocate space for a chunk in memory. This routine allocates
+ * memory space for non-filtered chunks from a block free list
+ * and uses malloc()/free() for filtered chunks.
+ *
+ * Return: Pointer to memory for chunk on success/NULL on failure
+ *
+ * Programmer: Neil Fortner
+ * May 3, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5D_chunk_realloc(void *chk, size_t size, const H5O_pline_t *pline)
+{
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_realloc)
+
+ HDassert(size);
+ HDassert(pline);
+
+ if(pline->nused > 0)
+ ret_value = H5MM_realloc(chk, size);
+ else
+ ret_value = H5FL_BLK_REALLOC(chunk, chk, size);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D_chunk_realloc() */
+
+
/*--------------------------------------------------------------------------
NAME
H5D_free_chunk_info
@@ -1589,53 +1617,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5D_chunk_in_cache
- *
- * Purpose: Check if a chunk is in the cache.
- *
- * Return: TRUE or FALSE
- *
- * Programmer: Quincey Koziol
- * 1 April 2008
- *
- *-------------------------------------------------------------------------
- */
-static hbool_t
-H5D_chunk_in_cache(const H5D_t *dset, const hsize_t *chunk_offset,
- hsize_t chunk_idx)
-{
- H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);/*raw data chunk cache*/
- hbool_t found = FALSE; /*already in cache? */
-
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5D_chunk_in_cache)
-
- /* Sanity checks */
- HDassert(dset);
- HDassert(chunk_offset);
-
- /* Check if the chunk is in the cache (but hasn't been written to disk yet) */
- if(rdcc->nslots > 0) {
- unsigned idx = H5D_CHUNK_HASH(dset->shared, chunk_idx); /* Cache entry index */
- H5D_rdcc_ent_t *ent = rdcc->slot[idx]; /* Cache entry */
-
- /* Potential match... */
- if(ent) {
- size_t u; /* Local index variable */
-
- for(u = 0, found = TRUE; u < dset->shared->layout.u.chunk.ndims; u++) {
- if(chunk_offset[u] != ent->offset[u]) {
- found = FALSE;
- break;
- } /* end if */
- } /* end for */
- } /* end if */
- } /* end if */
-
- FUNC_LEAVE_NOAPI(found)
-} /* end H5D_chunk_in_cache() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5D_chunk_read
*
* Purpose: Read from a chunked dataset.
@@ -1661,7 +1642,6 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
- unsigned idx_hint = 0; /* Cache index hint */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read)
@@ -1721,11 +1701,12 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
/* Get the info for the chunk in the file */
- if(H5D_chunk_get_info(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata) < 0)
+ if(H5D_chunk_lookup(io_info->dset, io_info->dxpl_id,
+ chunk_info->coords, chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
/* Check for non-existant chunk & skip it if appropriate */
- if(H5F_addr_defined(udata.addr) || H5D_chunk_in_cache(io_info->dset, chunk_info->coords, chunk_info->index)
+ if(H5F_addr_defined(udata.addr) || UINT_MAX != udata.idx_hint
|| !skip_missing_chunks) {
/* Load the chunk into cache and lock it. */
if((cacheable = H5D_chunk_cacheable(io_info, udata.addr, FALSE)) < 0)
@@ -1740,7 +1721,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE, &idx_hint)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, FALSE)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1773,7 +1754,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, FALSE, idx_hint, chunk, src_accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
@@ -1816,7 +1797,6 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
- unsigned idx_hint = 0; /* Cache index hint */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_write)
@@ -1857,7 +1837,8 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
/* Load the chunk into cache. But if the whole chunk is written,
* simply allocate space instead of load the chunk. */
- if(H5D_chunk_get_info(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata) < 0)
+ if(H5D_chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords,
+ chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
if((cacheable = H5D_chunk_cacheable(io_info, udata.addr, TRUE)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
@@ -1878,7 +1859,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, &idx_hint)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1930,7 +1911,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, TRUE, idx_hint, chunk, dst_accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
/* Advance to next chunk in list */
@@ -2256,11 +2237,10 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5D_chunk_get_info
+ * Function: H5D_chunk_lookup
*
- * Purpose: Get the info about a chunk if file space has been
- * assigned. Save the retrieved information in the udata
- * supplied.
+ * Purpose: Loops up a chunk in cache and on disk, and retrieves
+ * information about that chunk.
*
* Return: Non-negative on success/Negative on failure
*
@@ -2270,12 +2250,15 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5D_chunk_get_info(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
- H5D_chunk_ud_t *udata)
+H5D_chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
+ hsize_t chunk_idx, H5D_chunk_ud_t *udata)
{
+ H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
+ hbool_t found = FALSE; /* In cache? */
+ unsigned u; /* Counter */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_get_info)
+ FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_lookup)
HDassert(dset);
HDassert(dset->shared->layout.u.chunk.ndims > 0);
@@ -2298,28 +2281,49 @@ H5D_chunk_get_info(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset
udata->filter_mask = 0;
udata->addr = HADDR_UNDEF;
- /* Check for cached information */
- if(!H5D_chunk_cinfo_cache_found(&dset->shared->cache.chunk.last, udata)) {
- H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ /* Check for chunk in cache */
+ if(dset->shared->cache.chunk.nslots > 0) {
+ udata->idx_hint = H5D_CHUNK_HASH(dset->shared, chunk_idx);
+ ent = dset->shared->cache.chunk.slot[udata->idx_hint];
- /* Compose chunked index info struct */
- idx_info.f = dset->oloc.file;
- idx_info.dxpl_id = dxpl_id;
- idx_info.pline = &dset->shared->dcpl_cache.pline;
- idx_info.layout = &dset->shared->layout.u.chunk;
- idx_info.storage = &dset->shared->layout.storage.u.chunk;
+ if(ent)
+ for(u = 0, found = TRUE; u < dset->shared->layout.u.chunk.ndims; u++)
+ if(chunk_offset[u] != ent->offset[u]) {
+ found = FALSE;
+ break;
+ } /* end if */
+ } /* end if */
- /* Go get the chunk information */
- if((dset->shared->layout.storage.u.chunk.ops->get_addr)(&idx_info, udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address")
+ /* Find chunk addr */
+ if(found)
+ udata->addr = ent->chunk_addr;
+ else {
+ /* Invalidate idx_hint, to signal that the chunk is not in cache */
+ udata->idx_hint = UINT_MAX;
- /* Cache the information retrieved */
- H5D_chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata);
- } /* end if */
+ /* Check for cached information */
+ if(!H5D_chunk_cinfo_cache_found(&dset->shared->cache.chunk.last, udata)) {
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+
+ /* Compose chunked index info struct */
+ idx_info.f = dset->oloc.file;
+ idx_info.dxpl_id = dxpl_id;
+ idx_info.pline = &dset->shared->dcpl_cache.pline;
+ idx_info.layout = &dset->shared->layout.u.chunk;
+ idx_info.storage = &dset->shared->layout.storage.u.chunk;
+
+ /* Go get the chunk information */
+ if((dset->shared->layout.storage.u.chunk.ops->get_addr)(&idx_info, udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address")
+
+ /* Cache the information retrieved */
+ H5D_chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata);
+ } /* end if */
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5D_chunk_get_info() */
+} /* H5D_chunk_lookup() */
/*-------------------------------------------------------------------------
@@ -2360,7 +2364,7 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
HDassert(!ent->locked);
buf = ent->chunk;
- if(ent->dirty) {
+ if(ent->dirty && !ent->deleted) {
H5D_chunk_ud_t udata; /* pass through B-tree */
hbool_t must_insert = FALSE; /* Whether the chunk must go through the "insert" method */
@@ -2699,7 +2703,7 @@ done:
*/
void *
H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
- hbool_t relax, unsigned *idx_hint/*in,out*/)
+ hbool_t relax)
{
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
@@ -2709,7 +2713,6 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
- unsigned idx = 0; /*hash index number */
hbool_t found = FALSE; /*already in cache? */
haddr_t chunk_addr = HADDR_UNDEF; /* Address of chunk on disk */
size_t chunk_size; /*size of a chunk */
@@ -2730,20 +2733,21 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
HDassert(layout->u.chunk.size > 0);
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
- /* Search for the chunk in the cache */
- if(rdcc->nslots > 0) {
- idx = H5D_CHUNK_HASH(dset->shared, io_info->store->chunk.index);
- ent = rdcc->slot[idx];
+ /* Check if the chunk is in the cache */
+ if(UINT_MAX != udata->idx_hint) {
+ /* Sanity check */
+ HDassert(udata->idx_hint < rdcc->nslots);
+ HDassert(rdcc->slot[udata->idx_hint]);
- if(ent)
- for(u = 0, found = TRUE; u < layout->u.chunk.ndims; u++)
- if(io_info->store->chunk.offset[u] != ent->offset[u]) {
- found = FALSE;
- break;
- } /* end if */
- } /* end if */
+ /* Get the entry */
+ ent = rdcc->slot[udata->idx_hint];
+
+#ifndef NDEBUG
+ /* Make sure this is the right chunk */
+ for(u = 0; u < layout->u.chunk.ndims; u++)
+ HDassert(io_info->store->chunk.offset[u] == ent->offset[u]);
+#endif /* NDEBUG */
- if(found) {
/*
* Already in the cache. Count a hit.
*/
@@ -2758,6 +2762,9 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
*/
rdcc->stats.nhits++;
+ /* Still save the chunk address so the cache stays consistent */
+ chunk_addr = udata->addr;
+
if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
@@ -2799,6 +2806,9 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
else {
H5D_fill_value_t fill_status;
+ /* Sanity check */
+ HDassert(fill->alloc_time != H5D_ALLOC_TIME_EARLY);
+
/* Chunk size on disk isn't [likely] the same size as the final chunk
* size in memory, so allocate memory big enough. */
if(NULL == (chunk = H5D_chunk_alloc(chunk_size, pline)))
@@ -2816,8 +2826,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
/* Initialize the fill value buffer */
/* (use the compact dataset storage buffer as the fill value buffer) */
- if(H5D_fill_init(&fb_info, chunk, FALSE,
- NULL, NULL, NULL, NULL,
+ if(H5D_fill_init(&fb_info, chunk, NULL, NULL, NULL, NULL,
&dset->shared->dcpl_cache.fill, dset->shared->type,
dset->shared->type_id, (size_t)0, chunk_size, io_info->dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "can't initialize fill buffer info")
@@ -2838,75 +2847,7 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
} /* end else */
HDassert(found || chunk_size > 0);
- if(!found && rdcc->nslots > 0 && chunk_size <= rdcc->nbytes_max &&
- (!ent || !ent->locked)) {
- /*
- * Add the chunk to the cache only if the slot is not already locked.
- * Preempt enough things from the cache to make room.
- */
- if(ent) {
- if(H5D_chunk_cache_evict(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache")
- } /* end if */
- if(H5D_chunk_cache_prune(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, chunk_size) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache")
-
- /* Create a new entry */
- if(NULL == (ent = H5FL_MALLOC(H5D_rdcc_ent_t)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
-
- ent->locked = 0;
- ent->dirty = FALSE;
- ent->chunk_addr = chunk_addr;
- ent->proxy_addr = HADDR_UNDEF;
- ent->proxy = NULL;
- for(u = 0; u < layout->u.chunk.ndims; u++)
- ent->offset[u] = io_info->store->chunk.offset[u];
- H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t);
- H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t);
- ent->chunk = (uint8_t *)chunk;
-
- /* Add it to the cache */
- HDassert(NULL == rdcc->slot[idx]);
- rdcc->slot[idx] = ent;
- ent->idx = idx;
- rdcc->nbytes_used += chunk_size;
- rdcc->nused++;
-
- /* Add it to the linked list */
- ent->next = NULL;
- if(rdcc->tail) {
- rdcc->tail->next = ent;
- ent->prev = rdcc->tail;
- rdcc->tail = ent;
- } /* end if */
- else {
- rdcc->head = rdcc->tail = ent;
- ent->prev = NULL;
- } /* end else */
-
- /* Check for SWMR writes to the file */
- if(io_info->dset->shared->layout.storage.u.chunk.ops->can_swim
- && (H5F_INTENT(io_info->dset->oloc.file) & H5F_ACC_SWMR_WRITE)) {
- /* Insert a proxy entry in the cache, to make certain that the
- * flush dependencies are maintained in the proper way for SWMR
- * access to work.
- */
- if(H5D_chunk_proxy_create(io_info->dset, io_info->dxpl_id, (H5D_chunk_common_ud_t *)udata, ent) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert proxy for chunk in metadata cache")
- } /* end if */
-
- /* Indicate that the chunk is in the cache now */
- found = TRUE;
- } else if(!found) {
- /*
- * The chunk is larger than the entire cache so we don't cache it.
- * This is the reason all those arguments have to be repeated for the
- * unlock function.
- */
- ent = NULL;
- idx = UINT_MAX;
- } else {
+ if(ent) {
/*
* The chunk is not at the beginning of the cache; move it backward
* by one slot. This is how we implement the LRU preemption
@@ -2927,8 +2868,81 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
ent->next = ent->next->next;
ent->prev->next = ent;
} /* end if */
+ } /* end if */
+ else if(rdcc->nslots > 0 && chunk_size <= rdcc->nbytes_max) {
+ /* Calculate the index */
+ udata->idx_hint = H5D_CHUNK_HASH(dset->shared, io_info->store->chunk.index);
+
+ /* Add the chunk to the cache only if the slot is not already locked */
+ ent = rdcc->slot[udata->idx_hint];
+ if(!ent || !ent->locked) {
+ /* Preempt enough things from the cache to make room */
+ if(ent) {
+ if(H5D_chunk_cache_evict(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, ent, TRUE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache")
+ } /* end if */
+ if(H5D_chunk_cache_prune(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, chunk_size) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache")
+
+ /* Create a new entry */
+ if(NULL == (ent = H5FL_MALLOC(H5D_rdcc_ent_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
+
+ ent->locked = 0;
+ ent->dirty = FALSE;
+ ent->deleted = FALSE;
+ ent->chunk_addr = chunk_addr;
+ ent->proxy_addr = HADDR_UNDEF;
+ ent->proxy = NULL;
+ for(u = 0; u < layout->u.chunk.ndims; u++)
+ ent->offset[u] = io_info->store->chunk.offset[u];
+ H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t);
+ H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t);
+ ent->chunk = (uint8_t *)chunk;
+
+ /* Add it to the cache */
+ HDassert(NULL == rdcc->slot[udata->idx_hint]);
+ rdcc->slot[udata->idx_hint] = ent;
+ ent->idx = udata->idx_hint;
+ rdcc->nbytes_used += chunk_size;
+ rdcc->nused++;
+
+ /* Add it to the linked list */
+ ent->next = NULL;
+ if(rdcc->tail) {
+ rdcc->tail->next = ent;
+ ent->prev = rdcc->tail;
+ rdcc->tail = ent;
+ } /* end if */
+ else {
+ rdcc->head = rdcc->tail = ent;
+ ent->prev = NULL;
+ } /* end else */
+
+ /* Check for SWMR writes to the file */
+ if(io_info->dset->shared->layout.storage.u.chunk.ops->can_swim
+ && (H5F_INTENT(io_info->dset->oloc.file) & H5F_ACC_SWMR_WRITE)) {
+ /* Insert a proxy entry in the cache, to make certain that the
+ * flush dependencies are maintained in the proper way for SWMR
+ * access to work.
+ */
+ if(H5D_chunk_proxy_create(io_info->dset, io_info->dxpl_id, (H5D_chunk_common_ud_t *)udata, ent) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, NULL, "can't insert proxy for chunk in metadata cache")
+ } /* end if */
+ } /* end if */
+ else
+ /* We did not add the chunk to cache */
+ ent = NULL;
} /* end else */
+ if(!ent)
+ /*
+ * The chunk cannot be placed in cache so we don't cache it. This is the
+ * reason all those arguments have to be repeated for the unlock
+ * function.
+ */
+ udata->idx_hint = UINT_MAX;
+
/* Lock the chunk into the cache */
if(ent) {
HDassert(!ent->locked);
@@ -2936,9 +2950,6 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
chunk = ent->chunk;
} /* end if */
- if(idx_hint)
- *idx_hint = idx;
-
/* Set return value */
ret_value = chunk;
@@ -2980,7 +2991,7 @@ done:
*/
herr_t
H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
- hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed)
+ hbool_t dirty, void *chunk, uint32_t naccessed)
{
const H5O_layout_t *layout = &(io_info->dset->shared->layout); /* Dataset layout */
const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);
@@ -2991,7 +3002,7 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
HDassert(io_info);
HDassert(udata);
- if(UINT_MAX == idx_hint) {
+ if(UINT_MAX == udata->idx_hint) {
/*
* It's not in the cache, probably because it's too big. If it's
* dirty then flush it to disk. In any case, free the chunk.
@@ -3020,14 +3031,14 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
H5D_rdcc_ent_t *ent; /* Chunk's entry in the cache */
/* Sanity check */
- HDassert(idx_hint < rdcc->nslots);
- HDassert(rdcc->slot[idx_hint]);
- HDassert(rdcc->slot[idx_hint]->chunk == chunk);
+ HDassert(udata->idx_hint < rdcc->nslots);
+ HDassert(rdcc->slot[udata->idx_hint]);
+ HDassert(rdcc->slot[udata->idx_hint]->chunk == chunk);
/*
* It's in the cache so unlock it.
*/
- ent = rdcc->slot[idx_hint];
+ ent = rdcc->slot[udata->idx_hint];
HDassert(ent->locked);
if(dirty) {
ent->dirty = TRUE;
@@ -3211,6 +3222,9 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+ /* The last dimension in chunk_offset is always 0 */
+ chunk_offset[space_ndims] = (hsize_t)0;
+
/* Check if any space dimensions are 0, if so we do not have to do anything
*/
for(op_dim=0; op_dim<space_ndims; op_dim++)
@@ -3271,30 +3285,29 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Initialize the fill value buffer */
/* (delay allocating fill buffer for VL datatypes until refilling) */
/* (casting away const OK - QAK) */
- if(H5D_fill_init(&fb_info, NULL, (hbool_t)(pline->nused > 0),
- (H5MM_allocate_t)H5D_chunk_alloc, (void *)pline,
- (H5MM_free_t)H5D_chunk_xfree, (void *)pline,
+ if(H5D_fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D_chunk_alloc,
+ (void *)pline, (H5MM_free_t)H5D_chunk_xfree, (void *)pline,
&dset->shared->dcpl_cache.fill, dset->shared->type,
dset->shared->type_id, (size_t)0, orig_chunk_size, data_dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
fb_info_init = TRUE;
- /* Check if there are filters which need to be applied to the chunk */
- /* (only do this in advance when the chunk info can be re-used (i.e.
- * it doesn't contain any non-default VL datatype fill values)
- */
- if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
- size_t buf_size = orig_chunk_size;
+ /* Check if there are filters which need to be applied to the chunk */
+ /* (only do this in advance when the chunk info can be re-used (i.e.
+ * it doesn't contain any non-default VL datatype fill values)
+ */
+ if(!fb_info.has_vlen_fill_type && pline->nused > 0) {
+ size_t buf_size = orig_chunk_size;
- /* Push the chunk through the filters */
- if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0)
- HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
+ /* Push the chunk through the filters */
+ if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0)
+ HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
#if H5_SIZEOF_SIZE_T > 4
/* Check for the chunk expanding too much to encode in a 32-bit value */
if(orig_chunk_size > ((size_t)0xffffffff))
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
#endif /* H5_SIZEOF_SIZE_T > 4 */
- } /* end if */
+ } /* end if */
} /* end if */
/* Compose chunked index info struct */
@@ -3304,15 +3317,14 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
idx_info.layout = &dset->shared->layout.u.chunk;
idx_info.storage = &dset->shared->layout.storage.u.chunk;
- /* Calculate the minimum and maximum chunk offsets in each dimension */
+ /* Calculate the minimum and maximum chunk offsets in each dimension. Note
+ * that we assume here that all elements of space_dim are > 0. This is
+ * checked at the top of this function */
for(op_dim=0; op_dim<space_ndims; op_dim++) {
min_unalloc[op_dim] = ((old_dim[op_dim] + chunk_dim[op_dim] - 1)
/ chunk_dim[op_dim]) * chunk_dim[op_dim];
- if(space_dim[op_dim] == 0)
- max_unalloc[op_dim] = 0;
- else
- max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
- * chunk_dim[op_dim];
+ max_unalloc[op_dim] = ((space_dim[op_dim] - 1) / chunk_dim[op_dim])
+ * chunk_dim[op_dim];
} /* end for */
/* Loop over all chunks */
@@ -3339,23 +3351,48 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Check if allocation along this dimension is really necessary */
if(min_unalloc[op_dim] > max_unalloc[op_dim])
- carry = TRUE;
+ continue;
else {
/* Reset the chunk offset indices */
- HDmemset(chunk_offset, 0, (layout->u.chunk.ndims * sizeof(chunk_offset[0])));
+ HDmemset(chunk_offset, 0, ((unsigned)space_ndims
+ * sizeof(chunk_offset[0])));
chunk_offset[op_dim] = min_unalloc[op_dim];
carry = FALSE;
} /* end if */
while(!carry) {
- size_t chunk_size; /* Size of chunk in bytes, possibly filtered */
+ size_t chunk_size = orig_chunk_size; /* Size of chunk in bytes, possibly filtered */
#ifndef NDEBUG
/* None of the chunks should be allocated */
- if(H5D_chunk_get_info(dset, dxpl_id, chunk_offset, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- HDassert(!H5F_addr_defined(udata.addr));
+ {
+ hsize_t chunk_idx;
+
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index((unsigned)space_ndims, chunk_offset,
+ layout->u.chunk.dim, layout->u.chunk.down_chunks,
+ &chunk_idx) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
+
+ if(H5D_chunk_lookup(dset, dxpl_id, chunk_offset, chunk_idx,
+ &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ HDassert(!H5F_addr_defined(udata.addr));
+ } /* end block */
+
+ /* Make sure the chunk is really in the dataset and outside the
+ * original dimensions */
+ {
+ hbool_t outside_orig = FALSE;
+ for(i=0; i<space_ndims; i++) {
+ HDassert(chunk_offset[i] < space_dim[i]);
+ if(chunk_offset[i] >= old_dim[i])
+ outside_orig = TRUE;
+ } /* end for */
+ HDassert(outside_orig);
+ } /* end block */
#endif /* NDEBUG */
/* Check for VL datatype & non-default fill value */
@@ -3363,17 +3400,26 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Sanity check */
HDassert(should_fill);
+ /* Check to make sure the buffer is large enough. It is
+ * possible (though ill-advised) for the filter to shrink the
+ * buffer. */
+ if(fb_info.fill_buf_size < orig_chunk_size) {
+ if(NULL == (fb_info.fill_buf = H5D_chunk_realloc(
+ fb_info.fill_buf, orig_chunk_size, pline)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for raw data chunk")
+ fb_info.fill_buf_size = orig_chunk_size;
+ } /* end if */
+
/* Fill the buffer with VL datatype fill values */
if(H5D_fill_refill_vl(&fb_info, fb_info.elmts_per_buf, data_dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer")
/* Check if there are filters which need to be applied to the chunk */
if(pline->nused > 0) {
- size_t buf_size = orig_chunk_size;
- size_t nbytes = fb_info.fill_buf_size;
+ size_t nbytes = orig_chunk_size;
/* Push the chunk through the filters */
- if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &fb_info.fill_buf) < 0)
+ if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &fb_info.fill_buf_size, &fb_info.fill_buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
#if H5_SIZEOF_SIZE_T > 4
@@ -3385,11 +3431,7 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
/* Keep the number of bytes the chunk turned in to */
chunk_size = nbytes;
} /* end if */
- else
- H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
} /* end if */
- else
- chunk_size = orig_chunk_size;
/* Initialize the chunk information */
udata.common.layout = &layout->u.chunk;
@@ -3431,10 +3473,6 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
#endif /* H5_HAVE_PARALLEL */
} /* end if */
- /* Release the fill buffer if we need to re-allocate it each time */
- if(fb_info_init && fb_info.has_vlen_fill_type && pline->nused > 0)
- H5D_fill_release(&fb_info);
-
/* Increment indices */
carry = TRUE;
for(i = (int)(space_ndims - 1); i >= 0; --i) {
@@ -3452,8 +3490,8 @@ H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
} /* end while(!carry) */
/* Adjust max_unalloc_dim_idx so we don't allocate the same chunk twice.
- * Also check if this dimension started from 0 (and hence allocated all
- * of the chunks. */
+ * Also check if this dimension started from 0 (and hence allocated all
+ * of the chunks. */
if(min_unalloc[op_dim] == 0)
break;
else
@@ -3499,17 +3537,17 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D_chunk_prune_fill(const H5D_chunk_rec_t *chunk_rec, H5D_chunk_it_ud1_t *udata)
+H5D_chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
{
const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */
unsigned rank = udata->common.layout->ndims - 1; /* Dataset rank */
+ const hsize_t *chunk_offset = io_info->store->chunk.offset; /* Chunk offset */
H5S_sel_iter_t chunk_iter; /* Memory selection iteration info */
hssize_t sel_nelmts; /* Number of elements in selection */
hsize_t count[H5O_LAYOUT_NDIMS]; /* Element count of hyperslab */
void *chunk; /* The file chunk */
- unsigned idx_hint; /* Which chunk we're dealing with */
H5D_chunk_ud_t chk_udata; /* User data for locking chunk */
uint32_t bytes_accessed; /* Bytes accessed in chunk */
unsigned u; /* Local index variable */
@@ -3517,10 +3555,20 @@ H5D_chunk_prune_fill(const H5D_chunk_rec_t *chunk_rec, H5D_chunk_it_ud1_t *udata
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_prune_fill)
+ /* Get the info for the chunk in the file */
+ if(H5D_chunk_lookup(dset, io_info->dxpl_id, chunk_offset,
+ io_info->store->chunk.index, &chk_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* If this chunk does not exist in cache or on disk, no need to do anything
+ */
+ if(!H5F_addr_defined(chk_udata.addr) && UINT_MAX == chk_udata.idx_hint)
+ HGOTO_DONE(SUCCEED)
+
/* Initialize the fill value buffer, if necessary */
if(!udata->fb_info_init) {
H5_CHECK_OVERFLOW(udata->elmts_per_chunk, uint32_t, size_t);
- if(H5D_fill_init(&udata->fb_info, NULL, FALSE, NULL, NULL, NULL, NULL,
+ if(H5D_fill_init(&udata->fb_info, NULL, NULL, NULL, NULL, NULL,
&dset->shared->dcpl_cache.fill,
dset->shared->type, dset->shared->type_id, (size_t)udata->elmts_per_chunk,
io_info->dxpl_cache->max_temp_buf, io_info->dxpl_id) < 0)
@@ -3530,7 +3578,8 @@ H5D_chunk_prune_fill(const H5D_chunk_rec_t *chunk_rec, H5D_chunk_it_ud1_t *udata
/* Compute the # of elements to leave with existing value, in each dimension */
for(u = 0; u < rank; u++) {
- count[u] = MIN(layout->u.chunk.dim[u], (udata->dims[u] - chunk_rec->offset[u]));
+ count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u]
+ - chunk_offset[u]));
HDassert(count[u] > 0);
} /* end for */
@@ -3542,20 +3591,8 @@ H5D_chunk_prune_fill(const H5D_chunk_rec_t *chunk_rec, H5D_chunk_it_ud1_t *udata
if(H5S_select_hyperslab(udata->chunk_space, H5S_SELECT_NOTB, udata->hyper_start, NULL, count, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab")
- /* Calculate the index of this chunk */
- if(H5V_chunk_index(rank, chunk_rec->offset, layout->u.chunk.dim, layout->u.chunk.down_chunks, &io_info->store->chunk.index) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
-
/* Lock the chunk into the cache, to get a pointer to the chunk buffer */
- /* (Casting away const OK -QAK) */
- io_info->store->chunk.offset = (hsize_t *)chunk_rec->offset;
- chk_udata.common.layout = &layout->u.chunk;
- chk_udata.common.storage = &layout->storage.u.chunk;
- chk_udata.common.offset = chunk_rec->offset;
- chk_udata.nbytes = chunk_rec->nbytes;
- chk_udata.filter_mask = chunk_rec->filter_mask;
- chk_udata.addr = chunk_rec->chunk_addr;
- if(NULL == (chunk = (void *)H5D_chunk_lock(udata->io_info, &chk_udata, FALSE, &idx_hint)))
+ if(NULL == (chunk = (void *)H5D_chunk_lock(io_info, &chk_udata, FALSE)))
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk")
@@ -3596,7 +3633,7 @@ H5D_chunk_prune_fill(const H5D_chunk_rec_t *chunk_rec, H5D_chunk_it_ud1_t *udata
bytes_accessed = (uint32_t)sel_nelmts * layout->u.chunk.dim[rank];
/* Release lock on chunk */
- if(H5D_chunk_unlock(io_info, &chk_udata, TRUE, idx_hint, chunk, bytes_accessed) < 0)
+ if(H5D_chunk_unlock(io_info, &chk_udata, TRUE, chunk, bytes_accessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
done:
@@ -3605,82 +3642,6 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5D_chunk_prune_cb
- *
- * Purpose: Search for chunks that are no longer inside the pruned
- * dataset's extent
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu
- * March 26, 2002
- *
- *-------------------------------------------------------------------------
- */
-/* ARGSUSED */
-static int
-H5D_chunk_prune_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
-{
- H5D_chunk_it_ud1_t *udata = (H5D_chunk_it_ud1_t *)_udata; /* User data */
- H5D_chunk_prune_stack_t *stack_node = NULL; /* Stack node for chunk to remove */
- unsigned rank; /* Current # of dimensions */
- hbool_t should_delete = FALSE; /* Whether the chunk should be deleted */
- hbool_t needs_fill = FALSE; /* Whether the chunk overlaps the new extent and needs fill valiues */
- unsigned u; /* Local index variable */
- int ret_value = H5_ITER_CONT; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_prune_cb)
-
- /* Figure out what chunks are no longer in use for the specified extent and release them */
- rank = udata->common.layout->ndims - 1;
- for(u = 0; u < rank; u++)
- /* The chunk record points to a chunk of storage that contains the
- * beginning of the logical address space represented by UDATA.
- */
- if(udata->shrunk_dims[u]) {
- if(chunk_rec->offset[u] >= udata->dims[u]) {
- /* Indicate that the chunk will be deleted */
- should_delete = TRUE;
-
- /* Break out of loop, we know the chunk is outside the current dimensions */
- break;
- } /* end if */
- /* Check for chunk that overlaps new extent and will need fill values */
- else if((chunk_rec->offset[u] + udata->common.layout->dim[u]) > udata->dims[u])
- /* Indicate that the chunk needs filling */
- /* (but continue in loop, since it could be outside the extent in
- * another dimension -QAK)
- */
- needs_fill = TRUE;
- } /* end if */
-
- /* Check for chunk to delete */
- if(should_delete) {
- /* Allocate space for the removal stack node */
- if(NULL == (stack_node = H5FL_MALLOC(H5D_chunk_prune_stack_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for removal stack node")
-
- /* Store the record for the chunk */
- stack_node->rec = *chunk_rec;
-
- /* Push the chunk description onto the stack */
- stack_node->next = udata->rm_stack;
- udata->rm_stack = stack_node;
- } /* end if */
- /* Check for chunk that overlaps the new dataset dimensions and needs filling */
- else if(needs_fill)
- /* Write the fill value */
- if(H5D_chunk_prune_fill(chunk_rec, udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, H5_ITER_ERROR, "unable to write fill value")
-
-done:
- /* It is currently impossible to fail after the stack node has been
- * malloc'ed. No need to free it here on failure. */
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D_chunk_prune_cb() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5D_chunk_prune_by_extent
*
* Purpose: This function searches for chunks that are no longer necessary
@@ -3777,11 +3738,26 @@ done:
* To release the chunks, we traverse the B-tree to obtain a list of unused
* allocated chunks, and then call H5B_remove() for each chunk.
*
+ * Modifications: Neil Fortner
+ * 4 May 2010
+ * Rewrote algorithm to work in a way similar to
+ * H5D_chunk_allocate: it now iterates over all chunks that need
+ * to be filled or removed, and does so as appropriate. This
+ * avoids various issues with coherency of locally cached data
+ * which could occur with the previous implementation.
+ *
*-------------------------------------------------------------------------
*/
herr_t
-H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims)
+H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
{
+ hsize_t min_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of first chunk to modify in each dimension */
+ hsize_t max_mod_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of last chunk to modify in each dimension */
+ hssize_t max_fill_chunk_off[H5O_LAYOUT_NDIMS]; /* Offset of last chunk that might be filled in each dimension */
+ hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */
+ hbool_t dims_outside_fill[H5O_LAYOUT_NDIMS]; /* Dimensions in chunk offset outside fill dimensions */
+ int ndims_outside_fill = 0; /* Number of dimensions in chunk offset outside fill dimensions */
+ hbool_t has_fill = FALSE; /* Whether there are chunks that must be filled */
H5D_chk_idx_info_t idx_info; /* Chunked index info */
H5D_io_info_t chk_io_info; /* Chunked I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
@@ -3789,21 +3765,22 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims)
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset's layout */
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
- H5D_rdcc_ent_t *ent = NULL, *next = NULL; /* Cache entries */
- hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */
- hbool_t shrunk_dims[H5O_LAYOUT_NDIMS]; /* Dimensions which have shrunk */
+ H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
+ int space_ndims; /* Dataset's space rank */
+ hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */
+ int op_dim; /* Current operationg dimension */
+ hbool_t shrunk_dim[H5O_LAYOUT_NDIMS]; /* Dimensions which have shrunk */
H5D_chunk_it_ud1_t udata; /* Chunk index iterator user data */
hbool_t udata_init = FALSE; /* Whether the chunk index iterator user data has been initialized */
- hbool_t needs_fill; /* Whether we need to write the fill value */
- H5D_chunk_prune_stack_t *fill_stack = NULL; /* Stack of chunks to fill */
- H5D_chunk_prune_stack_t *tmp_stack; /* Temporary stack node pointer */
H5D_chunk_common_ud_t idx_udata; /* User data for index removal routine */
+ H5D_chunk_ud_t chk_udata; /* User data for getting chunk info */
H5S_t *chunk_space = NULL; /* Dataspace for a chunk */
- hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */
+ hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Chunk dimensions */
+ hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
uint32_t elmts_per_chunk; /* Elements in chunk */
- unsigned rank; /* Current # of dimensions */
- unsigned u; /* Local index variable */
+ hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
+ int i; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5D_chunk_prune_by_extent, FAIL)
@@ -3819,19 +3796,26 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims)
H5D_COPS_BTREE == layout->storage.u.chunk.ops));
HDassert(dxpl_cache);
- /* set the removal stack pointer in udata to NULL, so if the function fails
- * early it will not try to free the nonexistent stack */
- udata.rm_stack = NULL;
-
/* Fill the DXPL cache values for later use */
if(H5D_get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
/* Go get the rank & dimensions (including the element size) */
- rank = layout->u.chunk.ndims - 1;
- if(H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL) < 0)
+ if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim,
+ NULL)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions")
- curr_dims[rank] = layout->u.chunk.dim[rank];
+ space_dim[space_ndims] = layout->u.chunk.dim[space_ndims];
+
+ /* The last dimension in chunk_offset is always 0 */
+ chunk_offset[space_ndims] = (hsize_t)0;
+
+ /* Check if any old dimensions are 0, if so we do not have to do anything */
+ for(op_dim=0; op_dim<space_ndims; op_dim++)
+ if(old_dim[op_dim] == 0) {
+ /* Reset any cached chunk info for this dataset */
+ H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
/* Round up to the next integer # of chunks, to accomodate partial chunks */
/* Use current dims because the indices have already been updated! -NAF */
@@ -3839,22 +3823,25 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims)
/* (also copy the chunk dimensions into 'hsize_t' array for creating dataspace) */
/* (also compute the dimensions which have been shrunk) */
elmts_per_chunk = 1;
- for(u = 0; u < rank; u++) {
- elmts_per_chunk *= layout->u.chunk.dim[u];
- chunk_dims[u] = layout->u.chunk.dim[u];
- shrunk_dims[u] = curr_dims[u] < old_dims[u];
+ for(i = 0; i < space_ndims; i++) {
+ elmts_per_chunk *= layout->u.chunk.dim[i];
+ chunk_dim[i] = layout->u.chunk.dim[i];
+ shrunk_dim[i] = space_dim[i] < old_dim[i];
} /* end for */
/* Create a dataspace for a chunk & set the extent */
- if(NULL == (chunk_space = H5S_create_simple(rank, chunk_dims, NULL)))
+ if(NULL == (chunk_space = H5S_create_simple((unsigned)space_ndims,
+ chunk_dim, NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
/* Reset hyperslab start array */
/* (hyperslabs will always start from origin) */
HDmemset(hyper_start, 0, sizeof(hyper_start));
- /* Set up chunked I/O info object, for operations on chunks (in callback) */
- /* (Casting away const OK -QAK) */
+ /* Set up chunked I/O info object, for operations on chunks (in callback)
+ * Note that we only need to set chunk_offset once, as the array's address
+ * will never change. */
+ chk_store.chunk.offset = chunk_offset;
H5D_BUILD_IO_INFO_RD(&chk_io_info, dset, dxpl_cache, dxpl_id, &chk_store, NULL);
/* Compose chunked index info struct */
@@ -3870,91 +3857,197 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims)
udata.common.storage = &layout->storage.u.chunk;
udata.io_info = &chk_io_info;
udata.idx_info = &idx_info;
- udata.dims = curr_dims;
- udata.shrunk_dims = shrunk_dims;
+ udata.space_dim = space_dim;
+ udata.shrunk_dim = shrunk_dim;
udata.elmts_per_chunk = elmts_per_chunk;
udata.chunk_space = chunk_space;
udata.hyper_start = hyper_start;
udata_init = TRUE;
- /*-------------------------------------------------------------------------
- * Figure out what chunks are no longer in use for the specified extent
- * and release them from the linked list raw data cache
- *-------------------------------------------------------------------------
- */
- for(ent = rdcc->head; ent; ent = next) {
- /* Get pointer to next extry in cache, in case this one is evicted */
- next = ent->next;
-
- needs_fill = FALSE;
-
- /* Check for chunk offset outside of new dimensions */
- for(u = 0; u < rank; u++) {
- if((hsize_t)ent->offset[u] >= curr_dims[u]) {
- /* Evict the entry from the cache, but do not flush it to disk */
- if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, FALSE) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
+ /* Initialize user data for removal */
+ idx_udata.layout = &layout->u.chunk;
+ idx_udata.storage = &layout->storage.u.chunk;
- /* We don't need to write the fill value */
- needs_fill = FALSE;
+ /*
+ * Determine the chunks which need to be filled or removed
+ */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Calculate the largest offset of chunks that might need to be
+ * modified in this dimension */
+ max_mod_chunk_off[op_dim] = chunk_dim[op_dim] * ((old_dim[op_dim] - 1)
+ / chunk_dim[op_dim]);
+
+ /* Calculate the largest offset of chunks that might need to be
+ * filled in this dimension */
+ if(0 == space_dim[op_dim])
+ max_fill_chunk_off[op_dim] = -1;
+ else
+ max_fill_chunk_off[op_dim] = (hssize_t)(chunk_dim[op_dim]
+ * ((MIN(space_dim[op_dim], old_dim[op_dim]) - 1)
+ / chunk_dim[op_dim]));
+
+ if(shrunk_dim[op_dim]) {
+ /* Calculate the smallest offset of chunks that might need to be
+ * modified in this dimension. Note that this array contains
+ * garbage for all dimensions which are not shrunk. These locations
+ * must not be read from! */
+ min_mod_chunk_off[op_dim] = chunk_dim[op_dim] * (space_dim[op_dim]
+ / chunk_dim[op_dim]);
+
+ /* Determine if we need to fill chunks in this dimension */
+ if((hssize_t)min_mod_chunk_off[op_dim]
+ == max_fill_chunk_off[op_dim]) {
+ fill_dim[op_dim] = TRUE;
+ has_fill = TRUE;
+ } /* end if */
+ else
+ fill_dim[op_dim] = FALSE;
+ } /* end if */
+ else
+ fill_dim[op_dim] = FALSE;
+ } /* end for */
- /* Break out of loop, chunk is evicted */
- break;
- } else if(!H5F_addr_defined(ent->chunk_addr) && shrunk_dims[u]
- && (ent->offset[u] + chunk_dims[u]) > curr_dims[u])
- /* We need to write the fill value to the unused parts of chunk */
- needs_fill = TRUE;
- } /* end for */
+ /* Check the cache for any entries that are outside the bounds. Mark these
+ * entries as deleted so they are not flushed to disk accidentally. This is
+ * only necessary if there are chunks that need to be filled. */
+ if(has_fill)
+ for(ent = rdcc->head; ent; ent = ent->next)
+ /* Check for chunk offset outside of new dimensions */
+ for(i = 0; i<space_ndims; i++)
+ if((hsize_t)ent->offset[i] >= space_dim[i]) {
+ /* Mark the entry as "deleted" */
+ ent->deleted = TRUE;
+ break;
+ } /* end if */
- if(needs_fill) {
- /* Allocate space for the stack node */
- if(NULL == (tmp_stack = H5FL_MALLOC(H5D_chunk_prune_stack_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for stack node")
+ /* Main loop: fill or remove chunks */
+ for(op_dim=0; op_dim<space_ndims; op_dim++) {
+ /* Check if modification along this dimension is really necessary */
+ if(!shrunk_dim[op_dim])
+ continue;
+ else {
+ HDassert((hsize_t) max_mod_chunk_off[op_dim]
+ >= min_mod_chunk_off[op_dim]);
- /* Set up chunk record for fill routine */
- tmp_stack->rec.nbytes = dset->shared->layout.u.chunk.size;
- HDmemcpy(tmp_stack->rec.offset, ent->offset, sizeof(tmp_stack->rec.offset));
- tmp_stack->rec.filter_mask = 0; /* Since the chunk is already in cache this doesn't matter */
- tmp_stack->rec.chunk_addr = ent->chunk_addr;
+ /* Reset the chunk offset indices */
+ HDmemset(chunk_offset, 0, ((unsigned)space_ndims
+ * sizeof(chunk_offset[0])));
+ chunk_offset[op_dim] = min_mod_chunk_off[op_dim];
+
+ /* Initialize "dims_outside_fill" array */
+ ndims_outside_fill = 0;
+ for(i=0; i<space_ndims; i++)
+ if((hssize_t)chunk_offset[i] > max_fill_chunk_off[i]) {
+ dims_outside_fill[i] = TRUE;
+ ndims_outside_fill++;
+ } /* end if */
+ else
+ dims_outside_fill[i] = FALSE;
- /* Push the chunk description onto the stack */
- tmp_stack->next = fill_stack;
- fill_stack = tmp_stack;
+ carry = FALSE;
} /* end if */
- } /* end for */
- /* Traverse the stack of chunks to be filled, filling each. We will free
- * the nodes later in the "done" section. */
- tmp_stack = fill_stack;
- while(tmp_stack) {
- /* Write the fill value */
- if(H5D_chunk_prune_fill(&(tmp_stack->rec), &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
+ while(!carry) {
+ /* Calculate the index of this chunk */
+ if(H5V_chunk_index((unsigned)space_ndims, chunk_offset,
+ layout->u.chunk.dim, layout->u.chunk.down_chunks,
+ &(chk_io_info.store->chunk.index)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
+
+ if(0 == ndims_outside_fill) {
+ HDassert(fill_dim[op_dim]);
+ HDassert(chunk_offset[op_dim] == min_mod_chunk_off[op_dim]);
+
+ /* Fill the unused parts of the chunk */
+ if(H5D_chunk_prune_fill(&udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value")
+ } /* end if */
+ else {
+#ifndef NDEBUG
+ /* Make sure this chunk is really outside the new dimensions */
+ {
+ hbool_t outside_dim = FALSE;
+
+ for(i=0; i<space_ndims; i++)
+ if(chunk_offset[i] >= space_dim[i]){
+ outside_dim = TRUE;
+ break;
+ } /* end if */
+ HDassert(outside_dim);
+ } /* end block */
+#endif /* NDEBUG */
- /* Advance the stack pointer */
- tmp_stack = tmp_stack->next;
- } /* end while */
+ /* Check if the chunk exists in cache or on disk */
+ if(H5D_chunk_lookup(dset, dxpl_id, chunk_offset,
+ chk_io_info.store->chunk.index, &chk_udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk")
+
+ /* Evict the entry from the cache if present, but do not flush
+ * it to disk */
+ if(UINT_MAX != chk_udata.idx_hint) {
+ if(H5D_chunk_cache_evict(dset, dxpl_id, dxpl_cache,
+ rdcc->slot[chk_udata.idx_hint], FALSE) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
+ } /* end if */
- /* Iterate over the chunks */
- if((dset->shared->layout.storage.u.chunk.ops->iterate)(&idx_info, H5D_chunk_prune_cb, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve prune chunks from index")
+ /* Remove the chunk from disk, if present */
+ if(H5F_addr_defined(chk_udata.addr)) {
+ /* Update the offset in idx_udata */
+ idx_udata.offset = chunk_offset;
- /* Traverse the stack of chunks to be deleted, removing each. We will free
- * the nodes later in the "done" section. */
- idx_udata.layout = &layout->u.chunk;
- idx_udata.storage = &layout->storage.u.chunk;
- tmp_stack = udata.rm_stack;
- while(tmp_stack) {
- /* Update the offset in idx_udata */
- idx_udata.offset = tmp_stack->rec.offset;
+ /* Remove the chunk from disk */
+ if((layout->storage.u.chunk.ops->remove)(&idx_info, &idx_udata)
+ < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to remove chunk entry from index")
+ } /* end if */
+ } /* end else */
- /* Remove the chunk from disk */
- if((layout->storage.u.chunk.ops->remove)(&idx_info, &idx_udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to remove chunk entry from index")
+ /* Increment indices */
+ carry = TRUE;
+ for(i = (int)(space_ndims - 1); i >= 0; --i) {
+ chunk_offset[i] += chunk_dim[i];
+ if(chunk_offset[i] > (hsize_t) max_mod_chunk_off[i]) {
+ /* Left maximum dimensions, "wrap around" and check if this
+ * dimension is no longer outside the fill dimension */
+ if(i == op_dim) {
+ chunk_offset[i] = min_mod_chunk_off[i];
+ if(dims_outside_fill[i] && fill_dim[i]) {
+ dims_outside_fill[i] = FALSE;
+ ndims_outside_fill--;
+ } /* end if */
+ } /* end if */
+ else {
+ chunk_offset[i] = 0;
+ if(dims_outside_fill[i] && max_fill_chunk_off[i] >= 0) {
+ dims_outside_fill[i] = FALSE;
+ ndims_outside_fill--;
+ } /* end if */
+ } /* end else */
+ } /* end if */
+ else {
+ /* Check if we just went outside the fill dimension */
+ if(!dims_outside_fill[i] && (hssize_t)chunk_offset[i]
+ > max_fill_chunk_off[i]) {
+ dims_outside_fill[i] = TRUE;
+ ndims_outside_fill++;
+ } /* end if */
+
+ /* We found the next chunk, so leave the loop */
+ carry = FALSE;
+ break;
+ } /* end else */
+ } /* end for */
+ } /* end while(!carry) */
- /* Advance the stack pointer */
- tmp_stack = tmp_stack->next;
- } /* end while */
+ /* Adjust max_mod_chunk_off so we don't modify the same chunk twice.
+ * Also check if this dimension started from 0 (and hence removed all
+ * of the chunks). */
+ if(min_mod_chunk_off[op_dim] == 0)
+ break;
+ else
+ max_mod_chunk_off[op_dim] = min_mod_chunk_off[op_dim]
+ - chunk_dim[op_dim];
+ } /* end for(op_dim=0...) */
/* Reset any cached chunk info for this dataset */
H5D_chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
@@ -3968,24 +4061,6 @@ done:
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
} /* end if */
- /* Free stack of filled chunks */
- tmp_stack = fill_stack;
- while(tmp_stack) {
- /* Free the stack node and advance the stack pointer */
- tmp_stack = tmp_stack->next;
- fill_stack = H5FL_FREE(H5D_chunk_prune_stack_t, fill_stack);
- fill_stack = tmp_stack;
- } /* end while */
-
- /* Free stack of removed chunks */
- tmp_stack = udata.rm_stack;
- while(tmp_stack) {
- /* Free the stack node and advance the stack pointer */
- tmp_stack = tmp_stack->next;
- udata.rm_stack = H5FL_FREE(H5D_chunk_prune_stack_t, udata.rm_stack);
- udata.rm_stack = tmp_stack;
- } /* end while */
-
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_chunk_prune_by_extent() */
@@ -5048,8 +5123,7 @@ H5D_nonexistent_readvv(const H5D_io_info_t *io_info,
buf = (unsigned char *)io_info->u.rbuf + mem_offset_arr[u];
/* Initialize the fill value buffer */
- if(H5D_fill_init(&fb_info, buf, FALSE,
- NULL, NULL, NULL, NULL,
+ if(H5D_fill_init(&fb_info, buf, NULL, NULL, NULL, NULL,
&dset->shared->dcpl_cache.fill, dset->shared->type,
dset->shared->type_id, (size_t)0, size, io_info->dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index b12eb3d..f0929d9 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -134,7 +134,7 @@ H5D_compact_fill(H5D_t *dset, hid_t dxpl_id)
/* Initialize the fill value buffer */
/* (use the compact dataset storage buffer as the fill value buffer) */
- if(H5D_fill_init(&fb_info, dset->shared->layout.storage.u.compact.buf, FALSE,
+ if(H5D_fill_init(&fb_info, dset->shared->layout.storage.u.compact.buf,
NULL, NULL, NULL, NULL,
&dset->shared->dcpl_cache.fill, dset->shared->type,
dset->shared->type_id, (size_t)0, dset->shared->layout.storage.u.compact.size, dxpl_id) < 0)
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index 567f4c9..b674f44 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -222,7 +222,7 @@ H5D_contig_fill(H5D_t *dset, hid_t dxpl_id)
H5_ASSIGN_OVERFLOW(npoints, snpoints, hssize_t, size_t);
/* Initialize the fill value buffer */
- if(H5D_fill_init(&fb_info, NULL, FALSE, NULL, NULL, NULL, NULL,
+ if(H5D_fill_init(&fb_info, NULL, NULL, NULL, NULL, NULL,
&dset->shared->dcpl_cache.fill,
dset->shared->type, dset->shared->type_id, npoints,
dxpl_cache->max_temp_buf, my_dxpl_id) < 0)
diff --git a/src/H5Dfill.c b/src/H5Dfill.c
index f418a81..1929a5a 100644
--- a/src/H5Dfill.c
+++ b/src/H5Dfill.c
@@ -363,7 +363,6 @@ done:
*/
herr_t
H5D_fill_init(H5D_fill_buf_info_t *fb_info, void *caller_fill_buf,
- hbool_t alloc_vl_during_refill,
H5MM_allocate_t alloc_func, void *alloc_info,
H5MM_free_t free_func, void *free_info,
const H5O_fill_t *fill, const H5T_t *dset_type, hid_t dset_type_id,
@@ -386,7 +385,6 @@ H5D_fill_init(H5D_fill_buf_info_t *fb_info, void *caller_fill_buf,
fb_info->fill = fill;
fb_info->file_type = dset_type;
fb_info->file_tid = dset_type_id;
- fb_info->alloc_vl_during_refill = alloc_vl_during_refill;
fb_info->fill_alloc_func = alloc_func;
fb_info->fill_alloc_info = alloc_info;
fb_info->fill_free_func = free_func;
@@ -434,16 +432,12 @@ H5D_fill_init(H5D_fill_buf_info_t *fb_info, void *caller_fill_buf,
fb_info->use_caller_fill_buf = TRUE;
} /* end if */
else {
- if(alloc_vl_during_refill)
- fb_info->fill_buf = NULL;
- else {
- if(alloc_func)
- fb_info->fill_buf = alloc_func(fb_info->fill_buf_size, alloc_info);
- else
- fb_info->fill_buf = H5FL_BLK_MALLOC(non_zero_fill, fb_info->fill_buf_size);
- if(NULL == fb_info->fill_buf)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for fill buffer")
- } /* end else */
+ if(alloc_func)
+ fb_info->fill_buf = alloc_func(fb_info->fill_buf_size, alloc_info);
+ else
+ fb_info->fill_buf = H5FL_BLK_MALLOC(non_zero_fill, fb_info->fill_buf_size);
+ if(NULL == fb_info->fill_buf)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for fill buffer")
} /* end else */
/* Get the datatype conversion path for this operation */
@@ -570,22 +564,14 @@ herr_t
H5D_fill_refill_vl(H5D_fill_buf_info_t *fb_info, size_t nelmts, hid_t dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
+ void * buf = NULL; /* Temporary fill buffer */
FUNC_ENTER_NOAPI(H5D_fill_refill_vl, FAIL)
/* Check args */
HDassert(fb_info);
HDassert(fb_info->has_vlen_fill_type);
-
- /* Check if we should allocate the fill buffer now */
- if(fb_info->alloc_vl_during_refill) {
- if(fb_info->fill_alloc_func)
- fb_info->fill_buf = fb_info->fill_alloc_func(fb_info->fill_buf_size, fb_info->fill_alloc_info);
- else
- fb_info->fill_buf = H5FL_BLK_MALLOC(non_zero_fill, fb_info->fill_buf_size);
- if(NULL == fb_info->fill_buf)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for fill buffer")
- } /* end if */
+ HDassert(fb_info->fill_buf);
/* Make a copy of the (disk-based) fill value into the buffer */
HDmemcpy(fb_info->fill_buf, fb_info->fill->buf, fb_info->file_elmt_size);
@@ -605,11 +591,32 @@ H5D_fill_refill_vl(H5D_fill_buf_info_t *fb_info, size_t nelmts, hid_t dxpl_id)
if(H5T_path_bkg(fb_info->mem_to_dset_tpath))
HDmemset(fb_info->bkg_buf, 0, fb_info->bkg_buf_size);
+ /* Make a copy of the fill buffer so we can free dynamic elements after conversion */
+ if(fb_info->fill_alloc_func)
+ buf = fb_info->fill_alloc_func(fb_info->fill_buf_size, fb_info->fill_alloc_info);
+ else
+ buf = H5FL_BLK_MALLOC(non_zero_fill, fb_info->fill_buf_size);
+ HDmemcpy(buf, fb_info->fill_buf, fb_info->fill_buf_size);
+
/* Type convert the dataset buffer, to copy any VL components */
if(H5T_convert(fb_info->mem_to_dset_tpath, fb_info->mem_tid, fb_info->file_tid, nelmts, (size_t)0, (size_t)0, fb_info->fill_buf, fb_info->bkg_buf, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "data type conversion failed")
done:
+ if (buf) {
+ /* Free dynamically allocated VL elements in fill buffer */
+ if (fb_info->fill->type)
+ H5T_vlen_reclaim_elmt(buf, fb_info->fill->type, dxpl_id);
+ else
+ H5T_vlen_reclaim_elmt(buf, fb_info->mem_type, dxpl_id);
+
+ /* Free temporary fill buffer */
+ if(fb_info->fill_free_func)
+ fb_info->fill_free_func(buf, fb_info->fill_free_info);
+ else
+ buf = H5FL_BLK_FREE(non_zero_fill, buf);
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_fill_refill_vl() */
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 3157e66..032006f 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -2308,10 +2308,10 @@ H5D_set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
*/
if(shrink && H5D_CHUNKED == dset->shared->layout.type &&
(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) {
- /* Remove excess chunks */
- if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks ")
- } /* end if */
+ /* Remove excess chunks */
+ if(H5D_chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks")
+ } /* end if */
/* Mark the dataspace as dirty, for later writing to the file */
dset->shared->space_dirty = TRUE;
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index fb0c1f4..ad9b737 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -861,7 +861,8 @@ H5D_link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type
HDmemset(coords, 0, sizeof(coords));
/* Look up address of chunk */
- if(H5D_chunk_get_info(io_info->dset, io_info->dxpl_id, coords, &udata) < 0)
+ if(H5D_chunk_lookup(io_info->dset, io_info->dxpl_id, coords,
+ io_info->store->chunk.index, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list")
ctg_store.contig.dset_addr = udata.addr;
@@ -1212,7 +1213,6 @@ if(H5DEBUG(D))
H5D_chunk_ud_t udata; /* B-tree pass-through */
void *chunk; /* Pointer to the data chunk in cache */
uint32_t accessed_bytes; /* Total accessed size in a chunk */
- unsigned idx_hint = 0; /* Cache index hint */
htri_t cacheable; /* Whether the chunk is cacheable */
/* Switch to independent I/O */
@@ -1225,7 +1225,8 @@ if(H5DEBUG(D))
/* Load the chunk into cache. But if the whole chunk is written,
* simply allocate space instead of load the chunk.
*/
- if(H5D_chunk_get_info(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata) < 0)
+ if(H5D_chunk_lookup(io_info->dset, io_info->dxpl_id,
+ chunk_info->coords, chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list")
/* Load the chunk into cache and lock it. */
@@ -1244,7 +1245,7 @@ if(H5DEBUG(D))
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, &idx_hint)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1276,7 +1277,7 @@ if(H5DEBUG(D))
} /* end else */
/* Release the cache lock on the chunk. */
- if(chunk && H5D_chunk_unlock(io_info, &udata, (io_info->op_type == H5D_IO_OP_WRITE), idx_hint, chunk, accessed_bytes) < 0)
+ if(chunk && H5D_chunk_unlock(io_info, &udata, (io_info->op_type == H5D_IO_OP_WRITE), chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
#else /* !defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) || !defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS) */
@@ -1441,7 +1442,8 @@ if(H5DEBUG(D)) {
#endif /* H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS */
/* Retrieve the chunk's address */
- if(H5D_chunk_get_info(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata) < 0)
+ if(H5D_chunk_lookup(io_info->dset, io_info->dxpl_id, chunk_info->coords,
+ chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list")
/* Independent I/O */
@@ -1449,7 +1451,6 @@ if(H5DEBUG(D)) {
void *chunk; /* Pointer to the data chunk in cache */
H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
uint32_t accessed_bytes = 0; /* Total accessed size in a chunk */
- unsigned idx_hint = 0; /* Cache index hint */
htri_t cacheable; /* Whether the chunk is cacheable */
/* Switch to independent I/O */
@@ -1472,7 +1473,7 @@ if(H5DEBUG(D)) {
entire_chunk = FALSE;
/* Lock the chunk into the cache */
- if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk, &idx_hint)))
+ if(NULL == (chunk = H5D_chunk_lock(io_info, &udata, entire_chunk)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Set up the storage buffer information for this chunk */
@@ -1505,7 +1506,7 @@ if(H5DEBUG(D)) {
/* Release the cache lock on the chunk. */
if(chunk)
- if(H5D_chunk_unlock(io_info, &udata, (io_info->op_type == H5D_IO_OP_WRITE), idx_hint, chunk, accessed_bytes) < 0)
+ if(H5D_chunk_unlock(io_info, &udata, (io_info->op_type == H5D_IO_OP_WRITE), chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
} /* end if */
else { /*collective I/O */
@@ -1755,7 +1756,8 @@ if(H5DEBUG(D))
H5D_chunk_ud_t udata; /* User data for querying chunk info */
/* Get address of chunk */
- if(H5D_chunk_get_info(io_info->dset, io_info->dxpl_id, chunk_info->coords, &udata) < 0)
+ if(H5D_chunk_lookup(io_info->dset, io_info->dxpl_id,
+ chunk_info->coords, chunk_info->index, &udata) < 0)
HGOTO_ERROR(H5E_STORAGE, H5E_CANTGET, FAIL, "couldn't get chunk info from skipped list")
chunk_addr = udata.addr;
} /* end if */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 0788637..d5a7e45 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -272,6 +272,7 @@ typedef struct H5D_chunk_ud_t {
H5D_chunk_common_ud_t common; /* Common info for B-tree user data (must be first) */
/* Upward */
+ unsigned idx_hint; /*index of chunk in cache, if present */
uint32_t nbytes; /*size of stored data */
unsigned filter_mask; /*excluded filters */
haddr_t addr; /*file address of chunk */
@@ -471,7 +472,6 @@ typedef struct {
/* Typedef for filling a buffer with a fill value */
typedef struct H5D_fill_buf_info_t {
- hbool_t alloc_vl_during_refill; /* Whether to allocate VL-datatype fill buffer during refill */
H5MM_allocate_t fill_alloc_func; /* Routine to call for allocating fill buffer */
void *fill_alloc_info; /* Extra info for allocation routine */
H5MM_free_t fill_free_func; /* Routine to call for freeing fill buffer */
@@ -509,6 +509,7 @@ typedef struct {
typedef struct H5D_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
+ hbool_t deleted; /*chunk about to be deleted (do not flush) */
hsize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
uint32_t rd_count; /*bytes remaining to be read */
uint32_t wr_count; /*bytes remaining to be written */
@@ -636,12 +637,12 @@ H5_DLL herr_t H5D_chunk_set_info(const H5D_t *dset);
H5_DLL herr_t H5D_chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
hid_t dapl_id);
H5_DLL hbool_t H5D_chunk_is_space_alloc(const H5O_storage_t *storage);
-H5_DLL herr_t H5D_chunk_get_info(const H5D_t *dset, hid_t dxpl_id,
- const hsize_t *chunk_offset, H5D_chunk_ud_t *udata);
+H5_DLL herr_t H5D_chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
+ const hsize_t *chunk_offset, hsize_t chunk_idx, H5D_chunk_ud_t *udata);
H5_DLL void *H5D_chunk_lock(const H5D_io_info_t *io_info,
- H5D_chunk_ud_t *udata, hbool_t relax, unsigned *idx_hint/*in,out*/);
+ H5D_chunk_ud_t *udata, hbool_t relax);
H5_DLL herr_t H5D_chunk_unlock(const H5D_io_info_t *io_info,
- const H5D_chunk_ud_t *udata, hbool_t dirty, unsigned idx_hint, void *chunk,
+ const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk,
uint32_t naccessed);
H5_DLL herr_t H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id,
const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset);
@@ -649,7 +650,7 @@ H5_DLL herr_t H5D_chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
H5_DLL herr_t H5D_chunk_allocate(H5D_t *dset, hid_t dxpl_id,
hbool_t full_overwrite, hsize_t old_dim[]);
H5_DLL herr_t H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
- const hsize_t *old_dims);
+ const hsize_t *old_dim);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5D_chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[]);
#endif /* H5_HAVE_PARALLEL */
@@ -681,7 +682,6 @@ H5_DLL herr_t H5D_efl_bh_info(H5F_t *f, hid_t dxpl_id, H5O_efl_t *efl,
H5_DLL herr_t H5D_fill(const void *fill, const H5T_t *fill_type, void *buf,
const H5T_t *buf_type, const H5S_t *space, hid_t dxpl_id);
H5_DLL herr_t H5D_fill_init(H5D_fill_buf_info_t *fb_info, void *caller_fill_buf,
- hbool_t alloc_vl_during_refill,
H5MM_allocate_t alloc_func, void *alloc_info,
H5MM_free_t free_func, void *free_info,
const H5O_fill_t *fill, const H5T_t *dset_type, hid_t dset_type_id,
diff --git a/src/H5Dproxy.c b/src/H5Dproxy.c
index 6d4311c..8a94402 100644
--- a/src/H5Dproxy.c
+++ b/src/H5Dproxy.c
@@ -486,7 +486,7 @@ HDfprintf(stderr, "%s: ent->proxy_addr = %a, dirty = %t\n", FUNC, ent->proxy_add
/* Check whether to mark the proxy as dirty */
if(dirty) {
- if(H5AC_mark_pinned_or_protected_entry_dirty(ent->proxy) < 0)
+ if(H5AC_mark_entry_dirty(ent->proxy) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTMARKDIRTY, FAIL, "can't mark chunk proxy entry in metadata cache as dirty")
} /* end if */
diff --git a/src/H5EAhdr.c b/src/H5EAhdr.c
index 1f6c79c..a7f12a5 100644
--- a/src/H5EAhdr.c
+++ b/src/H5EAhdr.c
@@ -604,7 +604,7 @@ H5EA__hdr_modified(H5EA_hdr_t *hdr))
HDassert(hdr->f);
/* Mark header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(hdr) < 0)
+ if(H5AC_mark_entry_dirty(hdr) < 0)
H5E_THROW(H5E_CANTMARKDIRTY, "unable to mark extensible array header as dirty")
CATCH
diff --git a/src/H5Edefin.h b/src/H5Edefin.h
index 753031d..887c2bb 100644
--- a/src/H5Edefin.h
+++ b/src/H5Edefin.h
@@ -108,6 +108,7 @@ hid_t H5E_CANTDELETE_g = FAIL; /* Can't delete message */
hid_t H5E_BADITER_g = FAIL; /* Iteration failed */
hid_t H5E_CANTPACK_g = FAIL; /* Can't pack messages */
hid_t H5E_CANTRESET_g = FAIL; /* Can't reset object */
+hid_t H5E_CANTRENAME_g = FAIL; /* Unable to rename object */
/* System level errors */
hid_t H5E_SYSERRSTR_g = FAIL; /* System error message */
@@ -157,7 +158,6 @@ hid_t H5E_PROTECT_g = FAIL; /* Protected metadata error */
hid_t H5E_NOTCACHED_g = FAIL; /* Metadata not currently cached */
hid_t H5E_SYSTEM_g = FAIL; /* Internal error detected */
hid_t H5E_CANTINS_g = FAIL; /* Unable to insert metadata into cache */
-hid_t H5E_CANTRENAME_g = FAIL; /* Unable to rename metadata */
hid_t H5E_CANTPROTECT_g = FAIL; /* Unable to protect metadata */
hid_t H5E_CANTUNPROTECT_g = FAIL; /* Unable to unprotect metadata */
hid_t H5E_CANTPIN_g = FAIL; /* Unable to pin cache entry */
@@ -174,7 +174,7 @@ hid_t H5E_CANTNOTIFY_g = FAIL; /* Unable to notify object about action
hid_t H5E_TRAVERSE_g = FAIL; /* Link traversal failure */
hid_t H5E_NLINKS_g = FAIL; /* Too many soft links in path */
hid_t H5E_NOTREGISTERED_g = FAIL; /* Link class not registered */
-hid_t H5E_CANTMOVE_g = FAIL; /* Move callback returned error */
+hid_t H5E_CANTMOVE_g = FAIL; /* Can't move object */
hid_t H5E_CANTSORT_g = FAIL; /* Can't sort objects */
/* Parallel MPI errors */
diff --git a/src/H5Einit.h b/src/H5Einit.h
index a8ccedc..802c94a 100644
--- a/src/H5Einit.h
+++ b/src/H5Einit.h
@@ -398,6 +398,11 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Can't reset object"))==NULL)
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
if((H5E_CANTRESET_g = H5I_register(H5I_ERROR_MSG, msg, FALSE))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
+assert(H5E_CANTRENAME_g==(-1));
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to rename object"))==NULL)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
+if((H5E_CANTRENAME_g = H5I_register(H5I_ERROR_MSG, msg, FALSE))<0)
+ HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
/* System level errors */
assert(H5E_SYSERRSTR_g==(-1));
@@ -587,11 +592,6 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to insert metadata into cache")
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
if((H5E_CANTINS_g = H5I_register(H5I_ERROR_MSG, msg, FALSE))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
-assert(H5E_CANTRENAME_g==(-1));
-if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to rename metadata"))==NULL)
- HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
-if((H5E_CANTRENAME_g = H5I_register(H5I_ERROR_MSG, msg, FALSE))<0)
- HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
assert(H5E_CANTPROTECT_g==(-1));
if((msg = H5E_create_msg(cls, H5E_MINOR, "Unable to protect metadata"))==NULL)
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
@@ -665,7 +665,7 @@ if((msg = H5E_create_msg(cls, H5E_MINOR, "Link class not registered"))==NULL)
if((H5E_NOTREGISTERED_g = H5I_register(H5I_ERROR_MSG, msg, FALSE))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
assert(H5E_CANTMOVE_g==(-1));
-if((msg = H5E_create_msg(cls, H5E_MINOR, "Move callback returned error"))==NULL)
+if((msg = H5E_create_msg(cls, H5E_MINOR, "Can't move object"))==NULL)
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
if((H5E_CANTMOVE_g = H5I_register(H5I_ERROR_MSG, msg, FALSE))<0)
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
diff --git a/src/H5Epubgen.h b/src/H5Epubgen.h
index d793096..f6a20f2 100644
--- a/src/H5Epubgen.h
+++ b/src/H5Epubgen.h
@@ -176,6 +176,7 @@ H5_DLLVAR hid_t H5E_CANTSHRINK_g; /* Can't shrink container */
#define H5E_BADITER (H5OPEN H5E_BADITER_g)
#define H5E_CANTPACK (H5OPEN H5E_CANTPACK_g)
#define H5E_CANTRESET (H5OPEN H5E_CANTRESET_g)
+#define H5E_CANTRENAME (H5OPEN H5E_CANTRENAME_g)
H5_DLLVAR hid_t H5E_LINKCOUNT_g; /* Bad object header link count */
H5_DLLVAR hid_t H5E_VERSION_g; /* Wrong version number */
H5_DLLVAR hid_t H5E_ALIGNMENT_g; /* Alignment error */
@@ -184,6 +185,7 @@ H5_DLLVAR hid_t H5E_CANTDELETE_g; /* Can't delete message */
H5_DLLVAR hid_t H5E_BADITER_g; /* Iteration failed */
H5_DLLVAR hid_t H5E_CANTPACK_g; /* Can't pack messages */
H5_DLLVAR hid_t H5E_CANTRESET_g; /* Can't reset object */
+H5_DLLVAR hid_t H5E_CANTRENAME_g; /* Unable to rename object */
/* System level errors */
#define H5E_SYSERRSTR (H5OPEN H5E_SYSERRSTR_g)
@@ -260,7 +262,6 @@ H5_DLLVAR hid_t H5E_NOIDS_g; /* Out of IDs for group */
#define H5E_NOTCACHED (H5OPEN H5E_NOTCACHED_g)
#define H5E_SYSTEM (H5OPEN H5E_SYSTEM_g)
#define H5E_CANTINS (H5OPEN H5E_CANTINS_g)
-#define H5E_CANTRENAME (H5OPEN H5E_CANTRENAME_g)
#define H5E_CANTPROTECT (H5OPEN H5E_CANTPROTECT_g)
#define H5E_CANTUNPROTECT (H5OPEN H5E_CANTUNPROTECT_g)
#define H5E_CANTPIN (H5OPEN H5E_CANTPIN_g)
@@ -280,7 +281,6 @@ H5_DLLVAR hid_t H5E_PROTECT_g; /* Protected metadata error */
H5_DLLVAR hid_t H5E_NOTCACHED_g; /* Metadata not currently cached */
H5_DLLVAR hid_t H5E_SYSTEM_g; /* Internal error detected */
H5_DLLVAR hid_t H5E_CANTINS_g; /* Unable to insert metadata into cache */
-H5_DLLVAR hid_t H5E_CANTRENAME_g; /* Unable to rename metadata */
H5_DLLVAR hid_t H5E_CANTPROTECT_g; /* Unable to protect metadata */
H5_DLLVAR hid_t H5E_CANTUNPROTECT_g; /* Unable to unprotect metadata */
H5_DLLVAR hid_t H5E_CANTPIN_g; /* Unable to pin cache entry */
@@ -302,7 +302,7 @@ H5_DLLVAR hid_t H5E_CANTNOTIFY_g; /* Unable to notify object about action */
H5_DLLVAR hid_t H5E_TRAVERSE_g; /* Link traversal failure */
H5_DLLVAR hid_t H5E_NLINKS_g; /* Too many soft links in path */
H5_DLLVAR hid_t H5E_NOTREGISTERED_g; /* Link class not registered */
-H5_DLLVAR hid_t H5E_CANTMOVE_g; /* Move callback returned error */
+H5_DLLVAR hid_t H5E_CANTMOVE_g; /* Can't move object */
H5_DLLVAR hid_t H5E_CANTSORT_g; /* Can't sort objects */
/* Parallel MPI errors */
diff --git a/src/H5Eterm.h b/src/H5Eterm.h
index 8f9db90..921b3b6 100644
--- a/src/H5Eterm.h
+++ b/src/H5Eterm.h
@@ -109,7 +109,8 @@ H5E_BADMESG_g=
H5E_CANTDELETE_g=
H5E_BADITER_g=
H5E_CANTPACK_g=
-H5E_CANTRESET_g=
+H5E_CANTRESET_g=
+H5E_CANTRENAME_g=
/* System level errors */
H5E_SYSERRSTR_g=
@@ -159,7 +160,6 @@ H5E_PROTECT_g=
H5E_NOTCACHED_g=
H5E_SYSTEM_g=
H5E_CANTINS_g=
-H5E_CANTRENAME_g=
H5E_CANTPROTECT_g=
H5E_CANTUNPROTECT_g=
H5E_CANTPIN_g=
diff --git a/src/H5F.c b/src/H5F.c
index e0d1127..a7f6aa8 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -930,7 +930,7 @@ H5F_new(H5F_file_t *shared, hid_t fcpl_id, hid_t fapl_id, H5FD_t *lf)
* The cache might be created with a different number of elements and
* the access property list should be updated to reflect that.
*/
- if(SUCCEED != H5AC_create(f, &(f->shared->mdc_initCacheCfg)))
+ if(H5AC_create(f, &(f->shared->mdc_initCacheCfg)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create metadata cache")
/* Create the file's "open object" information */
diff --git a/src/H5FAhdr.c b/src/H5FAhdr.c
index 644b4c9..5cfba0d 100644
--- a/src/H5FAhdr.c
+++ b/src/H5FAhdr.c
@@ -384,7 +384,7 @@ H5FA__hdr_modified(H5FA_hdr_t *hdr))
HDassert(hdr);
/* Mark header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(hdr) < 0)
+ if(H5AC_mark_entry_dirty(hdr) < 0)
H5E_THROW(H5E_CANTMARKDIRTY, "unable to mark fixed array header as dirty")
CATCH
diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c
index 9928cf9..40befaf 100644
--- a/src/H5FDstdio.c
+++ b/src/H5FDstdio.c
@@ -1037,6 +1037,9 @@ H5FD_stdio_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing)
HFILE filehandle; /* Windows file handle */
LARGE_INTEGER li; /* 64-bit integer for SetFilePointer() call */
+ /* Reset seek offset to beginning of file, so that file isn't re-extended later */
+ rewind(file->fp);
+
/* Map the posix file handle to a Windows file handle */
filehandle = _get_osfhandle(fd);
diff --git a/src/H5FS.c b/src/H5FS.c
index 88a17b7..27111a2 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -123,7 +123,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl
/*
* Allocate free space structure
*/
- if(NULL == (fspace = H5FS_new(nclasses, classes, cls_init_udata)))
+ if(NULL == (fspace = H5FS_new(f, nclasses, classes, cls_init_udata)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for free space free list")
/* Initialize creation information for free space manager */
@@ -139,7 +139,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl
/* Check if the free space tracker is supposed to be persistant */
if(fs_addr) {
/* Allocate space for the free space header */
- if(HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, dxpl_id, (hsize_t)H5FS_HEADER_SIZE(f))))
+ if(HADDR_UNDEF == (fspace->addr = H5MF_alloc(f, H5FD_MEM_FSPACE_HDR, dxpl_id, (hsize_t)fspace->hdr_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "file allocation failed for free space header")
/* Cache the new free space header (pinned) */
@@ -213,6 +213,7 @@ HDfprintf(stderr, "%s: Opening free space manager, fs_addr = %a, nclasses = %Zu\
cache_udata.nclasses = nclasses;
cache_udata.classes = classes;
cache_udata.cls_init_udata = cls_init_udata;
+ cache_udata.addr = fs_addr;
/* Protect the free space header */
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ)))
@@ -281,6 +282,7 @@ HDfprintf(stderr, "%s: Deleting free space manager, fs_addr = %a\n", FUNC, fs_ad
cache_udata.nclasses = 0;
cache_udata.classes = NULL;
cache_udata.cls_init_udata = NULL;
+ cache_udata.addr = fs_addr;
/* Protect the free space header */
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_WRITE)))
@@ -390,7 +392,7 @@ HDfprintf(stderr, "%s: Real sections to store in file\n", FUNC);
fspace->alloc_sect_size = (size_t)fspace->sect_size;
/* Mark free space header as dirty */
- if(H5AC_mark_pinned_or_protected_entry_dirty(fspace) < 0)
+ if(H5AC_mark_entry_dirty(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
} /* end if */
} /* end if */
@@ -447,7 +449,7 @@ HDfprintf(stderr, "%s: Section info went 'go away'\n", FUNC);
fspace->alloc_sect_size = 0;
/* Mark free space header as dirty */
- if(H5AC_mark_pinned_or_protected_entry_dirty(fspace) < 0)
+ if(H5AC_mark_entry_dirty(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
} /* end else */
} /* end if */
@@ -463,7 +465,7 @@ HDfprintf(stderr, "%s: Section info is NOT for file free space\n", FUNC);
fspace->alloc_sect_size = 0;
/* Mark free space header as dirty */
- if(H5AC_mark_pinned_or_protected_entry_dirty(fspace) < 0)
+ if(H5AC_mark_entry_dirty(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
/* Free previous serialized sections disk space */
@@ -513,7 +515,7 @@ HDfprintf(stderr, "%s: Leaving, ret_value = %d, fspace->rc = %u\n", FUNC, ret_va
*-------------------------------------------------------------------------
*/
H5FS_t *
-H5FS_new(size_t nclasses, const H5FS_section_class_t *classes[],
+H5FS_new(const H5F_t *f, size_t nclasses, const H5FS_section_class_t *classes[],
void *cls_init_udata)
{
H5FS_t *fspace = NULL; /* Free space manager */
@@ -558,6 +560,7 @@ H5FS_new(size_t nclasses, const H5FS_section_class_t *classes[],
/* Initialize non-zero information for new free space manager */
fspace->addr = HADDR_UNDEF;
+ fspace->hdr_size = H5FS_HEADER_SIZE(f);
fspace->sect_addr = HADDR_UNDEF;
/* Set return value */
@@ -604,7 +607,7 @@ H5FS_size(const H5F_t *f, const H5FS_t *fspace, hsize_t *meta_size)
HDassert(meta_size);
/* Get the free space size info */
- *meta_size += H5FS_HEADER_SIZE(f) + (fspace->sinfo ? fspace->sect_size : fspace->alloc_sect_size);
+ *meta_size += fspace->hdr_size + (fspace->sinfo ? fspace->sect_size : fspace->alloc_sect_size);
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5FS_size() */
@@ -728,7 +731,7 @@ HDfprintf(stderr, "%s: Marking free space header as dirty\n", FUNC);
/* Check if the free space manager is persistant */
if(H5F_addr_defined(fspace->addr))
/* Mark header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(fspace) < 0)
+ if(H5AC_mark_entry_dirty(fspace) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
done:
diff --git a/src/H5FScache.c b/src/H5FScache.c
index c0700df..17f8f6a 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -151,7 +151,6 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
{
H5FS_t *fspace = NULL; /* Free space header info */
H5FS_hdr_cache_ud_t *udata = (H5FS_hdr_cache_ud_t *)_udata; /* user data for callback */
- size_t size; /* Header size */
H5WB_t *wb = NULL; /* Wrapped buffer for header data */
uint8_t hdr_buf[H5FS_HDR_BUF_SIZE]; /* Buffer for header */
uint8_t *hdr; /* Pointer to header buffer */
@@ -165,29 +164,25 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Check arguments */
HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(udata);
/* Allocate a new free space manager */
- if(NULL == (fspace = H5FS_new(udata->nclasses, udata->classes, udata->cls_init_udata)))
+ if(NULL == (fspace = H5FS_new(udata->f, udata->nclasses, udata->classes, udata->cls_init_udata)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Set free space manager's internal information */
- fspace->addr = addr;
+ fspace->addr = udata->addr;
/* Wrap the local buffer for serialized header info */
if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, NULL, "can't wrap buffer")
- /* Compute the size of the free space header on disk */
- size = (size_t)H5FS_HEADER_SIZE(udata->f);
-
/* Get a pointer to a buffer that's large enough for header */
- if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, fspace->hdr_size)))
HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_FSPACE_HDR, addr, size, dxpl_id, hdr) < 0)
+ if(H5F_block_read(f, H5FD_MEM_FSPACE_HDR, addr, fspace->hdr_size, dxpl_id, hdr) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_READERROR, NULL, "can't read free space header")
p = hdr;
@@ -251,7 +246,7 @@ H5FS_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Metadata checksum */
UINT32DECODE(p, stored_chksum);
- HDassert((size_t)(p - (const uint8_t *)hdr) == size);
+ HDassert((size_t)(p - (const uint8_t *)hdr) == fspace->hdr_size);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -342,17 +337,13 @@ H5FS_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5F
uint8_t *hdr; /* Pointer to header buffer */
uint8_t *p; /* Pointer into raw data buffer */
uint32_t metadata_chksum; /* Computed metadata checksum value */
- size_t size; /* Header size on disk */
/* Wrap the local buffer for serialized header info */
if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't wrap buffer")
- /* Compute the size of the free space header on disk */
- size = (size_t)H5FS_HEADER_SIZE(f);
-
/* Get a pointer to a buffer that's large enough for header */
- if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (hdr = (uint8_t *)H5WB_actual(wb, fspace->hdr_size)))
HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "can't get actual buffer")
/* Get temporary pointer to header */
@@ -411,8 +402,8 @@ H5FS_cache_hdr_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5F
UINT32ENCODE(p, metadata_chksum);
/* Write the free space header. */
- HDassert((size_t)(p - hdr) == size);
- if(H5F_block_write(f, H5FD_MEM_FSPACE_HDR, addr, size, dxpl_id, hdr) < 0)
+ HDassert((size_t)(p - hdr) == fspace->hdr_size);
+ if(H5F_block_write(f, H5FD_MEM_FSPACE_HDR, addr, fspace->hdr_size, dxpl_id, hdr) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTFLUSH, FAIL, "unable to save free space header to disk")
fspace->cache_info.is_dirty = FALSE;
@@ -467,7 +458,7 @@ H5FS_cache_hdr_dest(H5F_t *f, H5FS_t *fspace)
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_FSPACE_HDR, H5AC_dxpl_id, fspace->cache_info.addr, (hsize_t)H5FS_HEADER_SIZE(f)) < 0)
+ if(H5MF_xfree(f, H5FD_MEM_FSPACE_HDR, H5AC_dxpl_id, fspace->cache_info.addr, (hsize_t)fspace->hdr_size) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to free free space header")
} /* end if */
@@ -533,7 +524,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS_cache_hdr_size(const H5F_t *f, const H5FS_t UNUSED *fspace, size_t *size_ptr)
+H5FS_cache_hdr_size(const H5F_t UNUSED *f, const H5FS_t *fspace, size_t *size_ptr)
{
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5FS_cache_hdr_size)
@@ -543,7 +534,7 @@ H5FS_cache_hdr_size(const H5F_t *f, const H5FS_t UNUSED *fspace, size_t *size_pt
HDassert(size_ptr);
/* Set size value */
- *size_ptr = (size_t)H5FS_HEADER_SIZE(f);
+ *size_ptr = fspace->hdr_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5FS_cache_hdr_size() */
@@ -580,17 +571,12 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Check arguments */
HDassert(f);
- HDassert(H5F_addr_defined(addr));
HDassert(udata);
/* Allocate a new free space section info */
if(NULL == (sinfo = H5FS_sinfo_new(udata->f, udata->fspace)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Sanity check address */
- if(H5F_addr_ne(addr, udata->fspace->sect_addr))
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "incorrect address for free space sections")
-
/* Allocate space for the buffer to serialize the sections into */
H5_ASSIGN_OVERFLOW(/* To: */ old_sect_size, /* From: */ udata->fspace->sect_size, /* From: */ hsize_t, /* To: */ size_t);
if(NULL == (buf = H5FL_BLK_MALLOC(sect_block, (size_t)udata->fspace->sect_size)))
diff --git a/src/H5FSdbg.c b/src/H5FSdbg.c
index 098c25d..9be363e 100644
--- a/src/H5FSdbg.c
+++ b/src/H5FSdbg.c
@@ -116,6 +116,7 @@ H5FS_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int
cache_udata.nclasses = 0;
cache_udata.classes = NULL;
cache_udata.cls_init_udata = NULL;
+ cache_udata.addr = addr;
/*
* Load the free space header.
@@ -257,6 +258,7 @@ H5FS_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, FILE *stream, int
cache_udata.nclasses = 0;
cache_udata.classes = NULL;
cache_udata.cls_init_udata = NULL;
+ cache_udata.addr = fs_addr;
/*
* Load the free space header.
diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h
index 2b0eab4..e22e411 100644
--- a/src/H5FSpkg.h
+++ b/src/H5FSpkg.h
@@ -99,6 +99,7 @@ typedef struct H5FS_hdr_cache_ud_t {
size_t nclasses; /* Number of section classes */
const H5FS_section_class_t **classes; /* Array of section class info */
void *cls_init_udata; /* Pointer to class init user data */
+ haddr_t addr; /* Address of header */
} H5FS_hdr_cache_ud_t;
/* Callback info for loading free space section info into the cache */
@@ -176,6 +177,7 @@ struct H5FS_t {
/* Computed/cached values */
unsigned rc; /* Count of outstanding references to struct */
haddr_t addr; /* Address of free space header on disk */
+ size_t hdr_size; /* Size of free space header on disk */
H5FS_sinfo_t *sinfo; /* Section information */
unsigned sinfo_lock_count; /* # of times the section info has been locked */
hbool_t sinfo_protected; /* Whether the section info was protected when locked */
@@ -219,8 +221,8 @@ H5FL_EXTERN(H5FS_t);
/******************************/
/* Free space manager header routines */
-H5_DLL H5FS_t *H5FS_new(size_t nclasses, const H5FS_section_class_t *classes[],
- void *cls_init_udata);
+H5_DLL H5FS_t *H5FS_new(const H5F_t *f, size_t nclasses,
+ const H5FS_section_class_t *classes[], void *cls_init_udata);
H5_DLL herr_t H5FS_incr(H5FS_t *fspace);
H5_DLL herr_t H5FS_decr(H5FS_t *fspace);
H5_DLL herr_t H5FS_dirty(H5FS_t *fspace);
diff --git a/src/H5FSsection.c b/src/H5FSsection.c
index 68f6840..9fb34df 100644
--- a/src/H5FSsection.c
+++ b/src/H5FSsection.c
@@ -368,7 +368,7 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
/* Check if the section info size in the file has changed */
if(fspace->sect_size != fspace->alloc_sect_size)
- cache_flags |= H5AC__SIZE_CHANGED_FLAG | H5AC__DELETED_FLAG | H5AC__TAKE_OWNERSHIP_FLAG;
+ cache_flags |= H5AC__DELETED_FLAG | H5AC__TAKE_OWNERSHIP_FLAG;
} /* end if */
/* Sanity check */
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index 71d9fef..c9e83cc 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -22,7 +22,6 @@
/* Public header files needed by this file */
#include "H5public.h"
#include "H5ACpublic.h"
-#include "H5Cpublic.h"
#include "H5Ipublic.h"
/* When this header is included from a private header, don't make calls to H5check() */
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 8a81e66..fd8c692 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -588,11 +588,15 @@ H5F_super_init(H5F_t *f, hid_t dxpl_id)
/* Check for non-default free space settings */
if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
- H5O_fsinfo_t fsinfo; /* Free space manager info message */
+ H5FD_mem_t type; /* Memory type for iteration */
+ H5O_fsinfo_t fsinfo; /* Free space manager info message */
/* Write free-space manager info message to superblock extension object header if needed */
fsinfo.strategy = f->shared->fs_strategy;
fsinfo.threshold = f->shared->fs_threshold;
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
+ fsinfo.fs_addr[type-1] = HADDR_UNDEF;
+
if(H5O_msg_create(&ext_loc, H5O_FSINFO_ID, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, &fsinfo, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update free-space info header message")
} /* end if */
@@ -657,7 +661,7 @@ H5F_super_dirty(H5F_t *f)
HDassert(f->shared->sblock);
/* Mark superblock dirty in cache, so change to EOA will get encoded */
- if(H5AC_mark_pinned_or_protected_entry_dirty(f->shared->sblock) < 0)
+ if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
done:
@@ -821,7 +825,7 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, void *mesg, unsigned id, hbool_
done:
/* Mark superblock dirty in cache, if necessary */
if(sblock_dirty)
- if(H5AC_mark_pinned_or_protected_entry_dirty(f->shared->sblock) < 0)
+ if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Gcache.c b/src/H5Gcache.c
index 98052e1..ff758c4 100644
--- a/src/H5Gcache.c
+++ b/src/H5Gcache.c
@@ -124,7 +124,6 @@ static H5G_node_t *
H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
{
H5G_node_t *sym = NULL;
- size_t size;
H5WB_t *wb = NULL; /* Wrapped buffer for node data */
uint8_t node_buf[H5G_NODE_BUF_SIZE]; /* Buffer for node */
uint8_t *node; /* Pointer to node buffer */
@@ -144,19 +143,23 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
* Initialize variables.
*/
+ /* Allocate symbol table data structures */
+ if(NULL == (sym = H5FL_CALLOC(H5G_node_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ sym->node_size = H5G_NODE_SIZE(f);
+ if(NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f)))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+
/* Wrap the local buffer for serialized node info */
if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, NULL, "can't wrap buffer")
- /* Compute the size of the serialized symbol table node on disk */
- size = H5G_node_size_real(f);
-
/* Get a pointer to a buffer that's large enough for node */
- if(NULL == (node = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (node = (uint8_t *)H5WB_actual(wb, sym->node_size)))
HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read the serialized symbol table node. */
- if(H5F_block_read(f, H5FD_MEM_BTREE, addr, size, dxpl_id, node) < 0)
+ if(H5F_block_read(f, H5FD_MEM_BTREE, addr, sym->node_size, dxpl_id, node) < 0)
HGOTO_ERROR(H5E_SYM, H5E_READERROR, NULL, "unable to read symbol table node")
/* Get temporary pointer to serialized node */
@@ -174,12 +177,6 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
/* reserved */
p++;
- /* Allocate symbol table data structures */
- if(NULL == (sym = H5FL_CALLOC(H5G_node_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- if(NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f)))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
-
/* number of symbols */
UINT16DECODE(p, sym->nsyms);
@@ -237,17 +234,13 @@ H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5G_node_
if(sym->cache_info.is_dirty) {
uint8_t *node; /* Pointer to node buffer */
uint8_t *p; /* Pointer into raw data buffer */
- size_t size;
/* Wrap the local buffer for serialized node info */
if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't wrap buffer")
- /* Compute the size of the serialized symbol table node on disk */
- size = H5G_node_size_real(f);
-
/* Get a pointer to a buffer that's large enough for node */
- if(NULL == (node = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (node = (uint8_t *)H5WB_actual(wb, sym->node_size)))
HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, FAIL, "can't get actual buffer")
/* Get temporary pointer to serialized symbol table node */
@@ -269,10 +262,10 @@ H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5G_node_
/* entries */
if(H5G_ent_encode_vec(f, &p, sym->entry, sym->nsyms) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTENCODE, FAIL, "can't serialize")
- HDmemset(p, 0, size - (size_t)(p - node));
+ HDmemset(p, 0, sym->node_size - (size_t)(p - node));
/* Write the serialized symbol table node. */
- if(H5F_block_write(f, H5FD_MEM_BTREE, addr, size, dxpl_id, node) < 0)
+ if(H5F_block_write(f, H5FD_MEM_BTREE, addr, sym->node_size, dxpl_id, node) < 0)
HGOTO_ERROR(H5E_SYM, H5E_WRITEERROR, FAIL, "unable to write symbol table node to the file")
/* Reset the node's dirty flag */
@@ -332,7 +325,7 @@ H5G_node_dest(H5F_t *f, H5G_node_t *sym)
if(sym->cache_info.free_file_space_on_destroy) {
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, sym->cache_info.addr, (hsize_t)H5G_node_size_real(f)) < 0)
+ if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, sym->cache_info.addr, (hsize_t)sym->node_size) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to free symbol table node")
} /* end if */
@@ -401,7 +394,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5G_node_size(const H5F_t *f, const H5G_node_t UNUSED *sym, size_t *size_ptr)
+H5G_node_size(const H5F_t UNUSED *f, const H5G_node_t *sym, size_t *size_ptr)
{
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5G_node_size)
@@ -411,7 +404,7 @@ H5G_node_size(const H5F_t *f, const H5G_node_t UNUSED *sym, size_t *size_ptr)
HDassert(f);
HDassert(size_ptr);
- *size_ptr = H5G_node_size_real(f);
+ *size_ptr = sym->node_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5G_node_size() */
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index 97353af..ef38380 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -55,8 +55,6 @@ typedef struct H5G_node_key_t {
/* Private macros */
-#define H5G_NODE_SIZEOF_HDR(F) (H5_SIZEOF_MAGIC + 4)
-
/* PRIVATE PROTOTYPES */
/* B-tree callbacks */
@@ -238,31 +236,6 @@ H5G_node_debug_key(FILE *stream, int indent, int fwidth, const void *_key,
/*-------------------------------------------------------------------------
- * Function: H5G_node_size_real
- *
- * Purpose: Returns the total size of a symbol table node.
- *
- * Return: Success: Total size of the node in bytes.
- *
- * Failure: Never fails.
- *
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Jun 23 1997
- *
- *-------------------------------------------------------------------------
- */
-size_t
-H5G_node_size_real(const H5F_t *f)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5G_node_size_real);
-
- FUNC_LEAVE_NOAPI(H5G_NODE_SIZEOF_HDR(f) +
- (2 * H5F_SYM_LEAF_K(f)) * H5G_SIZEOF_ENTRY(f));
-} /* end H5G_node_size_real() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5G_node_free
*
* Purpose: Destroy a symbol table node in memory.
@@ -322,7 +295,6 @@ H5G_node_create(H5F_t *f, hid_t dxpl_id, H5B_ins_t UNUSED op, void *_lt_key,
H5G_node_key_t *lt_key = (H5G_node_key_t *)_lt_key;
H5G_node_key_t *rt_key = (H5G_node_key_t *)_rt_key;
H5G_node_t *sym = NULL;
- hsize_t size = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5G_node_create)
@@ -335,9 +307,8 @@ H5G_node_create(H5F_t *f, hid_t dxpl_id, H5B_ins_t UNUSED op, void *_lt_key,
if(NULL == (sym = H5FL_CALLOC(H5G_node_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- size = H5G_node_size_real(f);
- HDassert(size);
- if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, size)))
+ sym->node_size = H5G_NODE_SIZE(f);
+ if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_BTREE, dxpl_id, (hsize_t)sym->node_size)))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to allocate file space")
if(NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f)))))
HGOTO_ERROR(H5E_SYM, H5E_CANTALLOC, FAIL, "memory allocation failed")
@@ -1437,7 +1408,7 @@ done:
/*-------------------------------------------------------------------------
* Function: H5G_node_iterate_size
*
- * Purpose: This function gets called by H5B_iterate_btree_size()
+ * Purpose: This function gets called by H5B_iterate_helper()
* to gather storage info for SNODs.
*
* Return: Non-negative on success/Negative on failure
@@ -1459,10 +1430,10 @@ H5G_node_iterate_size(H5F_t *f, hid_t UNUSED dxpl_id, const void UNUSED *_lt_key
HDassert(f);
HDassert(stab_size);
- *stab_size += H5G_node_size_real(f);
+ *stab_size += H5G_NODE_SIZE(f);
FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5G_btree_node_iterate() */
+} /* end H5G_node_iterate_size() */
/*-------------------------------------------------------------------------
@@ -1522,7 +1493,7 @@ H5G_node_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent,
"Dirty:",
sn->cache_info.is_dirty ? "Yes" : "No");
fprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
- "Size of Node (in bytes):", (unsigned)H5G_node_size_real(f));
+ "Size of Node (in bytes):", (unsigned)sn->node_size);
fprintf(stream, "%*s%-*s %u of %u\n", indent, "", fwidth,
"Number of Symbols:",
sn->nsyms, (unsigned)(2 * H5F_SYM_LEAF_K(f)));
diff --git a/src/H5Gpkg.h b/src/H5Gpkg.h
index 15e9254..82aa2e0 100644
--- a/src/H5Gpkg.h
+++ b/src/H5Gpkg.h
@@ -58,6 +58,19 @@
#define H5G_TARGET_EXISTS 0x0008
#define H5G_CRT_INTMD_GROUP 0x0010
+/* Size of a symbol table node on disk */
+#define H5G_NODE_SIZE(f) ( \
+ /* General metadata fields */ \
+ H5_SIZEOF_MAGIC \
+ + 1 /* Version */ \
+ + 1 /* Reserved */ \
+ + 2 /* Number of symbols */ \
+ \
+ /* Entries */ \
+ + ((2 * H5F_SYM_LEAF_K(f)) * H5G_SIZEOF_ENTRY(f)) \
+ )
+
+
/****************************/
/* Package Private Typedefs */
/****************************/
@@ -117,8 +130,9 @@ struct H5G_entry_t {
typedef struct H5G_node_t {
H5AC_info_t cache_info; /* Information for H5AC cache functions, _must_ be */
/* first field in structure */
- unsigned nsyms; /*number of symbols */
- H5G_entry_t *entry; /*array of symbol table entries */
+ size_t node_size; /* Size of node on disk */
+ unsigned nsyms; /* Number of symbols */
+ H5G_entry_t *entry; /* Array of symbol table entries */
} H5G_node_t;
/*
@@ -435,7 +449,6 @@ H5_DLL herr_t H5G_ent_debug(const H5G_entry_t *ent, FILE * stream, int indent,
/* Functions that understand symbol table nodes */
H5_DLL herr_t H5G_node_init(H5F_t *f);
-H5_DLL size_t H5G_node_size_real(const H5F_t *f);
H5_DLL int H5G_node_iterate(H5F_t *f, hid_t dxpl_id, const void *_lt_key, haddr_t addr,
const void *_rt_key, void *_udata);
H5_DLL int H5G_node_sumup(H5F_t *f, hid_t dxpl_id, const void *_lt_key, haddr_t addr,
diff --git a/src/H5Groot.c b/src/H5Groot.c
index 7470853..f754f29 100644
--- a/src/H5Groot.c
+++ b/src/H5Groot.c
@@ -262,7 +262,7 @@ done:
/* Mark superblock dirty in cache, if necessary */
if(sblock_dirty)
- if(H5AC_mark_pinned_or_protected_entry_dirty(f->shared->sblock) < 0)
+ if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5HF.c b/src/H5HF.c
index 3d90369..cc38f2a 100644
--- a/src/H5HF.c
+++ b/src/H5HF.c
@@ -152,7 +152,6 @@ H5HF_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
{
H5HF_t *fh = NULL; /* Pointer to new fractal heap */
H5HF_hdr_t *hdr = NULL; /* The fractal heap header information */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
haddr_t fh_addr; /* Heap header address */
H5HF_t *ret_value; /* Return value */
@@ -175,13 +174,9 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
if(NULL == (fh = H5FL_MALLOC(H5HF_t)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed for fractal heap info")
- /* Set up userdata for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
/* Lock the heap header into memory */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load fractal heap header")
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Point fractal heap wrapper at header and bump it's ref count */
fh->hdr = hdr;
@@ -228,7 +223,6 @@ H5HF_open(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
{
H5HF_t *fh = NULL; /* Pointer to new fractal heap */
H5HF_hdr_t *hdr = NULL; /* The fractal heap header information */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
H5HF_t *ret_value; /* Return value */
FUNC_ENTER_NOAPI(H5HF_open, NULL)
@@ -239,16 +233,12 @@ H5HF_open(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
HDassert(f);
HDassert(H5F_addr_defined(fh_addr));
- /* Set up userdata for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
/* Load the heap header into memory */
#ifdef QAK
HDfprintf(stderr, "%s: fh_addr = %a\n", FUNC, fh_addr);
#endif /* QAK */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "unable to load fractal heap header")
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
#ifdef QAK
HDfprintf(stderr, "%s: hdr->rc = %u, hdr->fspace = %p\n", FUNC, hdr->rc, hdr->fspace);
#endif /* QAK */
@@ -793,7 +783,6 @@ done:
herr_t
H5HF_close(H5HF_t *fh, hid_t dxpl_id)
{
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
hbool_t pending_delete = FALSE; /* Whether the heap is pending deletion */
haddr_t heap_addr = HADDR_UNDEF; /* Address of heap (for deletion) */
herr_t ret_value = SUCCEED; /* Return value */
@@ -866,13 +855,9 @@ HDfprintf(stderr, "%s; After iterator reset fh->hdr->rc = %Zu\n", FUNC, fh->hdr-
if(pending_delete) {
H5HF_hdr_t *hdr; /* Another pointer to fractal heap header */
- /* Set up userdata for protect call */
- cache_udata.f = fh->f;
- cache_udata.dxpl_id = dxpl_id;
-
/* Lock the heap header into memory */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(fh->f, dxpl_id, H5AC_FHEAP_HDR, heap_addr, &cache_udata, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header")
+ if(NULL == (hdr = H5HF_hdr_protect(fh->f, dxpl_id, heap_addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Set the shared heap header's file context for this operation */
hdr->f = fh->f;
@@ -907,7 +892,6 @@ herr_t
H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
{
H5HF_hdr_t *hdr = NULL; /* The fractal heap header information */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5HF_delete, FAIL)
@@ -918,16 +902,12 @@ H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
HDassert(f);
HDassert(H5F_addr_defined(fh_addr));
- /* Set up userdata for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
/* Lock the heap header into memory */
#ifdef QAK
HDfprintf(stderr, "%s: fh_addr = %a\n", FUNC, fh_addr);
#endif /* QAK */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header")
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Check for files using shared heap header */
if(hdr->file_rc)
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index f02b28c..f8829f2 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -288,9 +288,6 @@ H5HF_cache_hdr_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
if(NULL == (hdr = H5HF_hdr_alloc(udata->f)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Set the heap header's address */
- hdr->heap_addr = addr;
-
/* Wrap the local buffer for serialized header info */
if(NULL == (wb = H5WB_wrap(hdr_buf, sizeof(hdr_buf))))
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't wrap buffer")
@@ -745,7 +742,6 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
/* Set block's internal information */
iblock->rc = 0;
iblock->nrows = *udata->nrows;
- iblock->addr = addr;
iblock->nchildren = 0;
/* Wrap the local buffer for serialized indirect block */
@@ -753,7 +749,7 @@ H5HF_cache_iblock_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "can't wrap buffer")
/* Compute size of indirect block */
- iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock);
+ iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock->nrows);
/* Get a pointer to a buffer that's large enough for serialized indirect block */
if(NULL == (buf = (uint8_t *)H5WB_actual(wb, iblock->size)))
@@ -1024,8 +1020,8 @@ H5HF_cache_iblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
HDassert(!H5F_addr_eq(iblock->addr, addr));
/* Let the metadata cache know the block moved */
- if(H5AC_rename(f, H5AC_FHEAP_IBLOCK, iblock->addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move indirect block")
+ if(H5AC_move_entry(f, H5AC_FHEAP_IBLOCK, iblock->addr, addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move indirect block")
/* Update the internal address for the block */
iblock->addr = addr;
@@ -1498,8 +1494,8 @@ H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
/* Let the metadata cache know, if the block moved */
if(!H5F_addr_eq(hdr->man_dtable.table_addr, addr))
- if(H5AC_rename(f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move direct block")
+ if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
/* Update information about compressed direct block's location & size */
hdr->man_dtable.table_addr = addr;
@@ -1549,8 +1545,8 @@ H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
/* Let the metadata cache know, if the block moved */
if(!H5F_addr_eq(par_iblock->ents[par_entry].addr, addr))
- if(H5AC_rename(f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move direct block")
+ if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
/* Update information about compressed direct block's location & size */
par_iblock->ents[par_entry].addr = addr;
@@ -1585,8 +1581,8 @@ H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
HDassert(!H5F_addr_eq(hdr->man_dtable.table_addr, addr));
/* Let the metadata cache know the block moved */
- if(H5AC_rename(f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move direct block")
+ if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, hdr->man_dtable.table_addr, addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
/* Update information about direct block's location */
hdr->man_dtable.table_addr = addr;
@@ -1614,8 +1610,8 @@ H5HF_cache_dblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
HDassert(!H5F_addr_eq(par_iblock->ents[par_entry].addr, addr));
/* Let the metadata cache know the block moved */
- if(H5AC_rename(f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move direct block")
+ if(H5AC_move_entry(f, H5AC_FHEAP_DBLOCK, par_iblock->ents[par_entry].addr, addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move direct block")
/* Update information about direct block's location */
par_iblock->ents[par_entry].addr = addr;
diff --git a/src/H5HFdbg.c b/src/H5HFdbg.c
index e2ee20c..68b30d9 100644
--- a/src/H5HFdbg.c
+++ b/src/H5HFdbg.c
@@ -188,7 +188,6 @@ herr_t
H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int fwidth)
{
H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5HF_hdr_debug, FAIL)
@@ -202,15 +201,9 @@ H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDassert(indent >= 0);
HDassert(fwidth >= 0);
- /* Set up user data for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
- /*
- * Load the fractal heap header.
- */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header")
+ /* Load the fractal heap header */
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Print opening message */
HDfprintf(stream, "%*sFractal Heap Header...\n", indent, "");
@@ -399,7 +392,6 @@ H5HF_dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
{
H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */
H5HF_direct_t *dblock = NULL; /* Fractal heap direct block info */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
size_t blk_prefix_size; /* Size of prefix for block */
size_t amount_free; /* Amount of free space in block */
uint8_t *marker = NULL; /* Track free space for block */
@@ -418,15 +410,9 @@ H5HF_dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
HDassert(H5F_addr_defined(hdr_addr));
HDassert(block_size > 0);
- /* Set up user data for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
- /*
- * Load the fractal heap header.
- */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header")
+ /* Load the fractal heap header */
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/*
* Load the heap direct block
@@ -533,7 +519,6 @@ H5HF_iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
{
H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */
H5HF_indirect_t *iblock = NULL; /* Fractal heap direct block info */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
hbool_t did_protect; /* Whether we protected the indirect block or not */
char temp_str[64]; /* Temporary string, for formatting */
size_t u, v; /* Local index variable */
@@ -552,15 +537,9 @@ H5HF_iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
HDassert(H5F_addr_defined(hdr_addr));
HDassert(nrows > 0);
- /* Set up user data for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
- /*
- * Load the fractal heap header.
- */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, hdr_addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header")
+ /* Load the fractal heap header */
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/*
* Load the heap indirect block
@@ -727,7 +706,6 @@ H5HF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr,
FILE *stream, int indent, int fwidth)
{
H5HF_hdr_t *hdr = NULL; /* Fractal heap header info */
- H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5HF_sects_debug, FAIL)
@@ -741,15 +719,9 @@ H5HF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr,
HDassert(indent >= 0);
HDassert(fwidth >= 0);
- /* Set up user data for protect call */
- cache_udata.f = f;
- cache_udata.dxpl_id = dxpl_id;
-
- /*
- * Load the fractal heap header.
- */
- if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, fh_addr, &cache_udata, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap header")
+ /* Load the fractal heap header */
+ if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Initialize the free space information for the heap */
if(H5HF_space_start(hdr, dxpl_id, FALSE) < 0)
diff --git a/src/H5HFdblock.c b/src/H5HFdblock.c
index a68cc75..b2b3077 100644
--- a/src/H5HFdblock.c
+++ b/src/H5HFdblock.c
@@ -466,6 +466,7 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
*/
if(hdr->filter_len > 0) {
if(par_iblock == NULL) {
+ udata.odi_size = hdr->pline_root_direct_size;
udata.filter_mask = hdr->pline_root_direct_filter_mask;
} /* end if */
else {
@@ -473,11 +474,14 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
HDassert(H5F_addr_eq(par_iblock->ents[par_entry].addr, dblock_addr));
/* Set up parameters to read filtered direct block */
+ udata.odi_size = par_iblock->filt_ents[par_entry].size;
udata.filter_mask = par_iblock->filt_ents[par_entry].filter_mask;
} /* end else */
} /* end if */
- else
+ else {
+ udata.odi_size = dblock_size;
udata.filter_mask = 0;
+ } /* end else */
/* Protect the direct block */
if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, rw)))
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index 54b4ff9..67bb7b0 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -516,6 +516,51 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5HF_hdr_protect
+ *
+ * Purpose: Convenience wrapper around H5AC_protect on an indirect block
+ *
+ * Return: Pointer to indirect block on success, NULL on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 5 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+H5HF_hdr_t *
+H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
+{
+ H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
+ H5HF_hdr_t *hdr; /* Fractal heap header */
+ H5HF_hdr_t *ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HF_hdr_protect)
+
+ /* Check arguments */
+ HDassert(f);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Set up userdata for protect call */
+ cache_udata.f = f;
+ cache_udata.dxpl_id = dxpl_id;
+
+ /* Lock the heap header into memory */
+ if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, rw)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
+
+ /* Set the header's address */
+ hdr->heap_addr = addr;
+
+ /* Set the return value */
+ ret_value = hdr;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF_hdr_protect() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HF_hdr_incr
*
* Purpose: Increment component reference count on shared heap header
@@ -672,12 +717,12 @@ H5HF_hdr_dirty(H5HF_hdr_t *hdr)
/* Resize pinned header in cache if I/O filter is present. */
if(hdr->filter_len > 0) {
- if(H5AC_resize_pinned_entry(hdr, (size_t)hdr->heap_size) < 0)
+ if(H5AC_resize_entry(hdr, (size_t)hdr->heap_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize fractal heap header")
} /* end if */
/* Mark header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(hdr) < 0)
+ if(H5AC_mark_entry_dirty(hdr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTMARKDIRTY, FAIL, "unable to mark fractal heap header as dirty")
done:
diff --git a/src/H5HFiblock.c b/src/H5HFiblock.c
index d50dee2..7958075 100644
--- a/src/H5HFiblock.c
+++ b/src/H5HFiblock.c
@@ -332,7 +332,7 @@ H5HF_iblock_dirty(H5HF_indirect_t *iblock)
HDassert(iblock);
/* Mark indirect block as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(iblock) < 0)
+ if(H5AC_mark_entry_dirty(iblock) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTMARKDIRTY, FAIL, "unable to mark fractal heap indirect block as dirty")
done:
@@ -545,14 +545,14 @@ H5HF_man_iblock_root_double(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si
* QAK - 3/14/2006
*/
/* Free previous indirect block disk space */
- old_iblock_size = iblock->size;
if(H5MF_xfree(hdr->f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, iblock->addr, (hsize_t)iblock->size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap indirect block file space")
} /* end if */
/* Compute size of buffer needed for new indirect block */
iblock->nrows = new_nrows;
- iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock);
+ old_iblock_size = iblock->size;
+ iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock->nrows);
/* Allocate [temporary] space for the new indirect block on disk */
if(H5F_USE_TMP_SPACE(hdr->f)) {
@@ -566,14 +566,14 @@ H5HF_man_iblock_root_double(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si
/* Resize pinned indirect block in the cache, if its changed size */
if(old_iblock_size != iblock->size) {
- if(H5AC_resize_pinned_entry(iblock, (size_t)iblock->size) < 0)
+ if(H5AC_resize_entry(iblock, (size_t)iblock->size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize fractal heap indirect block")
} /* end if */
/* Move object in cache, if it actually was relocated */
if(H5F_addr_ne(iblock->addr, new_addr)) {
- if(H5AC_rename(hdr->f, H5AC_FHEAP_IBLOCK, iblock->addr, new_addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move fractal heap root indirect block")
+ if(H5AC_move_entry(hdr->f, H5AC_FHEAP_IBLOCK, iblock->addr, new_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move fractal heap root indirect block")
iblock->addr = new_addr;
} /* end if */
@@ -708,7 +708,6 @@ H5HF_man_iblock_root_halve(H5HF_indirect_t *iblock, hid_t dxpl_id)
* QAK - 6/12/2006
*/
/* Free previous indirect block disk space */
- old_size = iblock->size;
if(H5MF_xfree(hdr->f, H5FD_MEM_FHEAP_IBLOCK, dxpl_id, iblock->addr, (hsize_t)iblock->size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to free fractal heap indirect block file space")
} /* end if */
@@ -721,7 +720,8 @@ H5HF_man_iblock_root_halve(H5HF_indirect_t *iblock, hid_t dxpl_id)
/* Compute size of buffer needed for new indirect block */
old_nrows = iblock->nrows;
iblock->nrows = new_nrows;
- iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock);
+ old_size = iblock->size;
+ iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock->nrows);
/* Allocate [temporary] space for the new indirect block on disk */
if(H5F_USE_TMP_SPACE(hdr->f)) {
@@ -735,13 +735,13 @@ H5HF_man_iblock_root_halve(H5HF_indirect_t *iblock, hid_t dxpl_id)
/* Resize pinned indirect block in the cache, if it has changed size */
if(old_size != iblock->size) {
- if(H5AC_resize_pinned_entry(iblock, (size_t)iblock->size) < 0)
+ if(H5AC_resize_entry(iblock, (size_t)iblock->size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize fractal heap indirect block")
} /* end if */
/* Move object in cache, if it actually was relocated */
if(H5F_addr_ne(iblock->addr, new_addr)) {
- if(H5AC_rename(hdr->f, H5AC_FHEAP_IBLOCK, iblock->addr, new_addr) < 0)
+ if(H5AC_move_entry(hdr->f, H5AC_FHEAP_IBLOCK, iblock->addr, new_addr) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTSPLIT, FAIL, "unable to move fractal heap root indirect block")
iblock->addr = new_addr;
} /* end if */
@@ -985,7 +985,7 @@ H5HF_man_iblock_create(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_indirect_t *par_iblo
iblock->max_rows = max_rows;
/* Compute size of buffer needed for indirect block */
- iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock);
+ iblock->size = H5HF_MAN_INDIRECT_SIZE(hdr, iblock->nrows);
/* Allocate child block entry array */
if(NULL == (iblock->ents = H5FL_SEQ_MALLOC(H5HF_indirect_ent_t, (size_t)(iblock->nrows * hdr->man_dtable.cparam.width))))
@@ -1154,9 +1154,15 @@ H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
/* Protect the indirect block */
if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &cache_udata, rw)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap indirect block")
+
+ /* Set the indirect block's address */
+ iblock->addr = iblock_addr;
+
+ /* Indicate that the indirect block was protected */
*did_protect = TRUE;
} /* end if */
else
+ /* Indicate that the indirect block was _not_ protected */
*did_protect = FALSE;
/* Set the return value */
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index f862fe0..2746302 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -123,18 +123,17 @@
)
/* Size of managed indirect block */
-#define H5HF_MAN_INDIRECT_SIZE(h, i) ( \
+#define H5HF_MAN_INDIRECT_SIZE(h, r) ( \
/* General metadata fields */ \
H5HF_METADATA_PREFIX_SIZE(TRUE) \
\
/* Fractal heap managed, absolutely mapped indirect block specific fields */ \
+ (h)->sizeof_addr /* File address of heap owning the block */ \
+ (h)->heap_off_size /* Offset of the block in the heap */ \
- + (MIN((i)->nrows, (h)->man_dtable.max_direct_rows) * (h)->man_dtable.cparam.width * H5HF_MAN_INDIRECT_CHILD_DIR_ENTRY_SIZE(h)) /* Size of entries for direct blocks */ \
- + ((((i)->nrows > (h)->man_dtable.max_direct_rows) ? ((i)->nrows - (h)->man_dtable.max_direct_rows) : 0) * (h)->man_dtable.cparam.width * (h)->sizeof_addr) /* Size of entries for indirect blocks */ \
+ + (MIN(r, (h)->man_dtable.max_direct_rows) * (h)->man_dtable.cparam.width * H5HF_MAN_INDIRECT_CHILD_DIR_ENTRY_SIZE(h)) /* Size of entries for direct blocks */ \
+ + (((r > (h)->man_dtable.max_direct_rows) ? (r - (h)->man_dtable.max_direct_rows) : 0) * (h)->man_dtable.cparam.width * (h)->sizeof_addr) /* Size of entries for indirect blocks */ \
)
-
/* Compute the # of bytes required to store an offset into a given buffer size */
#define H5HF_SIZEOF_OFFSET_BITS(b) (((b) + 7) / 8)
#define H5HF_SIZEOF_OFFSET_LEN(l) H5HF_SIZEOF_OFFSET_BITS(H5V_log2_of2((unsigned)(l)))
@@ -484,6 +483,13 @@ typedef struct H5HF_iblock_cache_ud_t {
typedef struct H5HF_dblock_cache_ud_t {
H5HF_parent_t par_info; /* Parent info */
H5F_t * f; /* File pointer */
+ size_t odi_size; /* On disk image size of the direct block.
+ * Note that there is no necessary relation
+ * between this value, and the actual
+ * direct block size, as conpression may
+ * reduce the size of the on disk image,
+ * and check sums may increase it.
+ */
size_t dblock_size; /* size of the direct block, which bears
* no necessary relation to the block
* odi_size -- the size of the on disk
@@ -571,6 +577,8 @@ H5_DLL hsize_t H5HF_dtable_span_size(const H5HF_dtable_t *dtable, unsigned start
/* Heap header routines */
H5_DLL H5HF_hdr_t * H5HF_hdr_alloc(H5F_t *f);
H5_DLL haddr_t H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam);
+H5_DLL H5HF_hdr_t *H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr,
+ H5AC_protect_t rw);
H5_DLL herr_t H5HF_hdr_finish_init_phase1(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_hdr_finish_init_phase2(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_hdr_finish_init(H5HF_hdr_t *hdr);
diff --git a/src/H5HG.c b/src/H5HG.c
index e7b21dd..3082618 100644
--- a/src/H5HG.c
+++ b/src/H5HG.c
@@ -99,7 +99,7 @@ static haddr_t H5HG_create(H5F_t *f, hid_t dxpl_id, size_t size);
/* Package Variables */
/*********************/
-/* Declare a free list to manage the H5HG_t struct */
+/* Declare a free list to manage the H5HG_heap_t struct */
H5FL_DEFINE(H5HG_heap_t);
/* Declare a free list to manage sequences of H5HG_obj_t's */
@@ -253,6 +253,45 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5HG_protect
+ *
+ * Purpose: Convenience wrapper around H5AC_protect on an indirect block
+ *
+ * Return: Pointer to indirect block on success, NULL on failure
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, May 5, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+H5HG_heap_t *
+H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
+{
+ H5HG_heap_t *heap; /* Global heap */
+ H5HG_heap_t *ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT(H5HG_protect)
+
+ /* Check arguments */
+ HDassert(f);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Lock the heap into memory */
+ if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, rw)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
+
+ /* Set the heap's address */
+ heap->addr = addr;
+
+ /* Set the return value */
+ ret_value = heap;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5HG_protect() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HG_alloc
*
* Purpose: Given a heap with enough free space, this function will split
@@ -271,36 +310,36 @@ done:
*-------------------------------------------------------------------------
*/
static size_t
-H5HG_alloc (H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned * heap_flags_ptr)
+H5HG_alloc(H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned *heap_flags_ptr)
{
size_t idx;
- uint8_t *p = NULL;
+ uint8_t *p;
size_t need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(size);
- size_t ret_value; /* Return value */
+ size_t ret_value; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5HG_alloc);
/* Check args */
- assert (heap);
- assert (heap->obj[0].size>=need);
- assert (heap_flags_ptr);
+ HDassert(heap);
+ HDassert(heap->obj[0].size>=need);
+ HDassert(heap_flags_ptr);
/*
* Find an ID for the new object. ID zero is reserved for the free space
* object.
*/
- if(heap->nused<=H5HG_MAXIDX)
- idx=heap->nused++;
+ if(heap->nused <= H5HG_MAXIDX)
+ idx = heap->nused++;
else {
- for (idx=1; idx<heap->nused; idx++)
- if (NULL==heap->obj[idx].begin)
+ for(idx = 1; idx < heap->nused; idx++)
+ if(NULL == heap->obj[idx].begin)
break;
} /* end else */
HDassert(idx < heap->nused);
/* Check if we need more room to store heap objects */
- if(idx>=heap->nalloc) {
+ if(idx >= heap->nalloc) {
size_t new_alloc; /* New allocation number */
H5HG_obj_t *new_obj; /* New array of object descriptions */
@@ -310,16 +349,16 @@ H5HG_alloc (H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned * heap_flags_ptr)
HDassert(idx < new_alloc);
/* Reallocate array of objects */
- if (NULL==(new_obj = H5FL_SEQ_REALLOC (H5HG_obj_t, heap->obj, new_alloc)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed")
+ if(NULL == (new_obj = H5FL_SEQ_REALLOC(H5HG_obj_t, heap->obj, new_alloc)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, 0, "memory allocation failed")
/* Clear newly allocated space */
HDmemset(&new_obj[heap->nalloc], 0, (new_alloc - heap->nalloc) * sizeof(heap->obj[0]));
/* Update heap information */
- heap->nalloc=new_alloc;
- heap->obj=new_obj;
- HDassert(heap->nalloc>heap->nused);
+ heap->nalloc = new_alloc;
+ heap->obj = new_obj;
+ HDassert(heap->nalloc > heap->nused);
} /* end if */
/* Initialize the new object */
@@ -333,14 +372,14 @@ H5HG_alloc (H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned * heap_flags_ptr)
H5F_ENCODE_LENGTH (f, p, size);
/* Fix the free space object */
- if (need==heap->obj[0].size) {
+ if(need == heap->obj[0].size) {
/*
* All free space has been exhausted from this collection.
*/
heap->obj[0].size = 0;
heap->obj[0].begin = NULL;
-
- } else if (heap->obj[0].size-need >= H5HG_SIZEOF_OBJHDR (f)) {
+ } /* end if */
+ else if(heap->obj[0].size-need >= H5HG_SIZEOF_OBJHDR (f)) {
/*
* Some free space remains and it's larger than a heap object header,
* so write the new free heap object header to the heap.
@@ -353,8 +392,8 @@ H5HG_alloc (H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned * heap_flags_ptr)
UINT32ENCODE(p, 0); /*reserved*/
H5F_ENCODE_LENGTH (f, p, heap->obj[0].size);
assert(H5HG_ISALIGNED(heap->obj[0].size));
-
- } else {
+ } /* end else-if */
+ else {
/*
* Some free space remains but it's smaller than a heap object header,
* so we don't write the header.
@@ -368,11 +407,11 @@ H5HG_alloc (H5F_t *f, H5HG_heap_t *heap, size_t size, unsigned * heap_flags_ptr)
*heap_flags_ptr |= H5AC__DIRTIED_FLAG;
/* Set the return value */
- ret_value=idx;
+ ret_value = idx;
done:
FUNC_LEAVE_NOAPI(ret_value);
-}
+} /* end H5HG_alloc() */
/*-------------------------------------------------------------------------
@@ -403,11 +442,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HG_extend(H5F_t *f, H5HG_heap_t *heap, size_t need, unsigned *heap_flags_ptr)
+H5HG_extend(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t need)
{
+ H5HG_heap_t *heap = NULL; /* Pointer to heap to extend */
+ unsigned heap_flags = H5AC__NO_FLAGS_SET; /* Flags to unprotecting heap */
size_t old_size; /* Previous size of the heap's chunk */
- uint8_t *new_chunk = NULL; /* Pointer to new chunk information */
- uint8_t *p = NULL; /* Pointer to raw heap info */
+ uint8_t *new_chunk; /* Pointer to new chunk information */
+ uint8_t *p; /* Pointer to raw heap info */
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -415,8 +456,11 @@ H5HG_extend(H5F_t *f, H5HG_heap_t *heap, size_t need, unsigned *heap_flags_ptr)
/* Check args */
HDassert(f);
- HDassert(heap);
- HDassert(heap_flags_ptr);
+ HDassert(H5F_addr_defined(addr));
+
+ /* Protect the heap */
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
/* Re-allocate the heap information in memory */
if(NULL == (new_chunk = H5FL_BLK_REALLOC(gheap_chunk, heap->chunk, (heap->size + need))))
@@ -452,10 +496,17 @@ HDmemset(new_chunk + heap->size, 0, need);
H5F_ENCODE_LENGTH(f, p, heap->obj[0].size);
assert(H5HG_ISALIGNED(heap->obj[0].size));
+ /* Resize the heap in the cache */
+ if(H5AC_resize_entry(heap, heap->size) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize global heap in cache")
+
/* Mark the heap as dirty */
- *heap_flags_ptr |= H5AC__DIRTIED_FLAG;
+ heap_flags |= H5AC__DIRTIED_FLAG;
done:
+ if(heap && H5AC_unprotect(f, dxpl_id, H5AC_GHEAP, heap->addr, heap, heap_flags) < 0)
+ HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to unprotect heap")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HG_extend() */
@@ -521,14 +572,6 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/
* and protecting and unprotecting all the collections in the global
* heap on a regular basis will skew the replacement policy.
*
- * However, there is a bigger issue -- as best I can tell, we only look
- * for free space in global heap chunks that are in cache. If we can't
- * find any, we allocate a new chunk. This may be a problem in FP mode,
- * as the metadata cache is disabled. Do we allocate a new heap
- * collection for every entry in this case?
- *
- * Note that all this comes from a cursory read of the source. Don't
- * take any of it as gospel.
* JRM - 5/24/04
*/
for(cwfsno = 0; cwfsno < f->shared->ncwfs; cwfsno++)
@@ -557,8 +600,8 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/
if(extended < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTEXTEND, FAIL, "error trying to extend heap")
else if(extended == TRUE) {
- if(H5HG_extend(f, f->shared->cwfs[cwfsno], new_need, &heap_flags) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "unable to extend global heap collection")
+ if(H5HG_extend(f, dxpl_id, f->shared->cwfs[cwfsno]->addr, new_need) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to extend global heap collection")
addr = f->shared->cwfs[cwfsno]->addr;
found = TRUE;
break;
@@ -590,11 +633,12 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/
} /* end if */
} /* end else */
HDassert(H5F_addr_defined(addr));
- if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap")
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
/* Split the free space to make room for the new object */
- idx = H5HG_alloc(f, heap, size, &heap_flags);
+ if(0 == (idx = H5HG_alloc(f, heap, size, &heap_flags)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "unable to allocate global heap object")
/* Copy data into the heap */
if(size > 0) {
@@ -613,7 +657,7 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/
done:
if(heap && H5AC_unprotect(f, dxpl_id, H5AC_GHEAP, heap->addr, heap, heap_flags) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_PROTECT, FAIL, "unable to unprotect heap.")
+ HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to unprotect heap.")
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* H5HG_insert() */
@@ -653,8 +697,8 @@ H5HG_read(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj, void *object/*out*/,
HDassert(hobj);
/* Load the heap */
- if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, f, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "unable to load heap")
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);
HDassert(heap->obj[hobj->idx].begin);
@@ -692,7 +736,7 @@ H5HG_read(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj, void *object/*out*/,
done:
if(heap && H5AC_unprotect(f, dxpl_id, H5AC_GHEAP, hobj->addr, heap, H5AC__NO_FLAGS_SET) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_PROTECT, NULL, "unable to release object header")
+ HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, NULL, "unable to release object header")
if(NULL == ret_value && NULL == orig_object && object)
H5MM_free(object);
@@ -735,8 +779,8 @@ H5HG_link(H5F_t *f, hid_t dxpl_id, const H5HG_t *hobj, int adjust)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file")
/* Load the heap */
- if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, f, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap")
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
if(adjust != 0) {
HDassert(hobj->idx < heap->nused);
@@ -754,7 +798,7 @@ H5HG_link(H5F_t *f, hid_t dxpl_id, const H5HG_t *hobj, int adjust)
done:
if(heap && H5AC_unprotect(f, dxpl_id, H5AC_GHEAP, hobj->addr, heap, heap_flags) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_PROTECT, FAIL, "unable to release object header")
+ HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5HG_link() */
@@ -798,8 +842,8 @@ H5HG_remove (H5F_t *f, hid_t dxpl_id, H5HG_t *hobj)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file")
/* Load the heap */
- if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, hobj->addr, f, H5AC_WRITE)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap")
+ if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_WRITE)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);
HDassert(heap->obj[hobj->idx].begin);
@@ -859,7 +903,7 @@ H5HG_remove (H5F_t *f, hid_t dxpl_id, H5HG_t *hobj)
done:
if(heap && H5AC_unprotect(f, dxpl_id, H5AC_GHEAP, hobj->addr, heap, flags) < 0)
- HDONE_ERROR(H5E_HEAP, H5E_PROTECT, FAIL, "unable to release object header")
+ HDONE_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL);
} /* end H5HG_remove() */
diff --git a/src/H5HGcache.c b/src/H5HGcache.c
index e234d41..a3cf5b1 100644
--- a/src/H5HGcache.c
+++ b/src/H5HGcache.c
@@ -131,7 +131,6 @@ H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
/* Read the initial 4k page */
if(NULL == (heap = H5FL_CALLOC(H5HG_heap_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- heap->addr = addr;
heap->shared = f->shared;
if(NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, (size_t)H5HG_MINSIZE)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
diff --git a/src/H5HGdbg.c b/src/H5HGdbg.c
index 1fc0133..38b7047 100644
--- a/src/H5HGdbg.c
+++ b/src/H5HGdbg.c
@@ -73,8 +73,8 @@ H5HG_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDassert(indent >= 0);
HDassert(fwidth >= 0);
- if(NULL == (h = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, H5AC_READ)))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load global heap collection");
+ if(NULL == (h = H5HG_protect(f, dxpl_id, addr, H5AC_READ)))
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap collection");
fprintf(stream, "%*sGlobal Heap Collection...\n", indent, "");
fprintf(stream, "%*s%-*s %d\n", indent, "", fwidth,
diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h
index 7ce18c9..6c60656 100644
--- a/src/H5HGpkg.h
+++ b/src/H5HGpkg.h
@@ -150,6 +150,7 @@ struct H5HG_heap_t {
/* Package Private Prototypes */
/******************************/
H5_DLL herr_t H5HG_free(H5HG_heap_t *heap);
+H5_DLL H5HG_heap_t *H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw);
#endif /* _H5HGpkg_H */
diff --git a/src/H5HL.c b/src/H5HL.c
index debec41..61c97e1 100644
--- a/src/H5HL.c
+++ b/src/H5HL.c
@@ -210,6 +210,7 @@ H5HL_dblk_realloc(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t new_heap_size)
H5HL_dblk_t *dblk; /* Local heap data block */
haddr_t old_addr; /* Old location of heap data block */
haddr_t new_addr; /* New location of heap data block */
+ size_t old_heap_size; /* Old size of heap data block */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5HL_dblk_realloc)
@@ -220,8 +221,9 @@ H5HL_dblk_realloc(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t new_heap_size)
/* Release old space on disk */
old_addr = heap->dblk_addr;
- H5_CHECK_OVERFLOW(heap->dblk_size, size_t, hsize_t);
- if(H5MF_xfree(f, H5FD_MEM_LHEAP, dxpl_id, old_addr, (hsize_t)heap->dblk_size) < 0)
+ old_heap_size = heap->dblk_size;
+ H5_CHECK_OVERFLOW(old_heap_size, size_t, hsize_t);
+ if(H5MF_xfree(f, H5FD_MEM_LHEAP, dxpl_id, old_addr, (hsize_t)old_heap_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "can't release old heap data?")
/* Allocate new space on disk */
@@ -229,25 +231,29 @@ H5HL_dblk_realloc(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t new_heap_size)
if(HADDR_UNDEF == (new_addr = H5MF_alloc(f, H5FD_MEM_LHEAP, dxpl_id, (hsize_t)new_heap_size)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "unable to allocate file space for heap")
+ /* Update heap info*/
+ heap->dblk_addr = new_addr;
+ heap->dblk_size = new_heap_size;
+
/* Check if heap data block actually moved in the file */
if(H5F_addr_eq(old_addr, new_addr)) {
/* Check if heap data block is contiguous w/prefix */
if(heap->single_cache_obj) {
/* Sanity check */
- HDassert(H5F_addr_eq(heap->prfx_addr + heap->prfx_size, heap->dblk_addr));
+ HDassert(H5F_addr_eq(heap->prfx_addr + heap->prfx_size, old_addr));
HDassert(heap->prfx);
/* Resize the heap prefix in the cache */
- if(H5AC_resize_pinned_entry(heap->prfx, (size_t)(heap->prfx_size + new_heap_size)) < 0)
+ if(H5AC_resize_entry(heap->prfx, (size_t)(heap->prfx_size + new_heap_size)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize heap in cache")
} /* end if */
else {
/* Sanity check */
- HDassert(H5F_addr_ne(heap->prfx_addr + heap->prfx_size, heap->dblk_addr));
+ HDassert(H5F_addr_ne(heap->prfx_addr + heap->prfx_size, old_addr));
HDassert(heap->dblk);
/* Resize the heap data block in the cache */
- if(H5AC_resize_pinned_entry(heap->dblk, (size_t)new_heap_size) < 0)
+ if(H5AC_resize_entry(heap->dblk, (size_t)new_heap_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize heap in cache")
} /* end else */
} /* end if */
@@ -260,7 +266,7 @@ H5HL_dblk_realloc(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t new_heap_size)
/* Resize current heap prefix */
heap->prfx_size = H5HL_SIZEOF_HDR(f);
- if(H5AC_resize_pinned_entry(heap->prfx, (size_t)heap->prfx_size) < 0)
+ if(H5AC_resize_entry(heap->prfx, (size_t)heap->prfx_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize heap prefix in cache")
/* Insert data block into cache (pinned) */
@@ -276,20 +282,22 @@ H5HL_dblk_realloc(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t new_heap_size)
/* (ignore [unlikely] case where heap data block ends up
* contiguous w/heap prefix again.
*/
- if(H5AC_resize_pinned_entry(heap->dblk, (size_t)new_heap_size) < 0)
+ if(H5AC_resize_entry(heap->dblk, (size_t)new_heap_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, FAIL, "unable to resize heap data block in cache")
/* Relocate the heap data block in the cache */
- if(H5AC_rename(f, H5AC_LHEAP_DBLK, old_addr, new_addr) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRENAME, FAIL, "unable to move heap data block in cache")
+ if(H5AC_move_entry(f, H5AC_LHEAP_DBLK, old_addr, new_addr) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTMOVE, FAIL, "unable to move heap data block in cache")
} /* end else */
} /* end else */
- /* Update heap info*/
- heap->dblk_addr = new_addr;
- heap->dblk_size = new_heap_size;
-
done:
+ if(ret_value < 0) {
+ /* Restore old heap address & size */
+ heap->dblk_addr = old_addr;
+ heap->dblk_size = old_heap_size;
+ } /* end if */
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HL_dblk_realloc() */
@@ -450,6 +458,7 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
/* Construct the user data for protect callback */
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
+ prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
prfx_udata.free_block = H5HL_FREE_NULL;
@@ -654,12 +663,12 @@ H5HL_dirty(H5HL_t *heap)
/* Sanity check */
HDassert(heap->dblk);
- if(H5AC_mark_pinned_or_protected_entry_dirty(heap->dblk) < 0)
+ if(H5AC_mark_entry_dirty(heap->dblk) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTMARKDIRTY, FAIL, "unable to mark heap data block as dirty")
} /* end if */
/* Mark heap prefix as dirty */
- if(H5AC_mark_pinned_or_protected_entry_dirty(heap->prfx) < 0)
+ if(H5AC_mark_entry_dirty(heap->prfx) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTMARKDIRTY, FAIL, "unable to mark heap prefix as dirty")
done:
@@ -783,12 +792,12 @@ H5HL_insert(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t buf_size, const void *
/* Check for prefix & data block contiguous */
if(heap->single_cache_obj) {
/* Resize prefix+data block */
- if(H5AC_resize_pinned_entry(heap->prfx, (size_t)(heap->prfx_size + new_dblk_size)) < 0)
+ if(H5AC_resize_entry(heap->prfx, (size_t)(heap->prfx_size + new_dblk_size)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, UFAIL, "unable to resize heap prefix in cache")
} /* end if */
else {
/* Resize 'standalone' data block */
- if(H5AC_resize_pinned_entry(heap->dblk, (size_t)new_dblk_size) < 0)
+ if(H5AC_resize_entry(heap->dblk, (size_t)new_dblk_size) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRESIZE, UFAIL, "unable to resize heap data block in cache")
} /* end else */
@@ -1066,6 +1075,7 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
/* Construct the user data for protect callback */
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
+ prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
prfx_udata.free_block = H5HL_FREE_NULL;
@@ -1145,6 +1155,7 @@ H5HL_get_size(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t *size)
/* Construct the user data for protect callback */
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
+ prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
prfx_udata.free_block = H5HL_FREE_NULL;
@@ -1198,6 +1209,7 @@ H5HL_heapsize(H5F_t *f, hid_t dxpl_id, haddr_t addr, hsize_t *heap_size)
/* Construct the user data for protect callback */
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
+ prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
prfx_udata.free_block = H5HL_FREE_NULL;
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index 7932651..238dce6 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -303,7 +303,7 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap prefix")
/* Store the prefix's address & length */
- heap->prfx_addr = addr;
+ heap->prfx_addr = udata->prfx_addr;
heap->prfx_size = udata->sizeof_prfx;
/* Heap data size */
diff --git a/src/H5HLpkg.h b/src/H5HLpkg.h
index 926ecd3..b7e0ece 100644
--- a/src/H5HLpkg.h
+++ b/src/H5HLpkg.h
@@ -123,6 +123,7 @@ typedef struct H5HL_cache_prfx_ud_t {
/* Downwards */
size_t sizeof_size; /* Size of file sizes */
size_t sizeof_addr; /* Size of file addresses */
+ haddr_t prfx_addr; /* Address of prefix */
size_t sizeof_prfx; /* Size of heap prefix */
/* Upwards */
diff --git a/src/H5MF.c b/src/H5MF.c
index 480f3bb..a8f12bb 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -905,7 +905,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5MF_can_shrink
+ * Function: H5MF_try_shrink
*
* Purpose: Try to shrink the size of a file with a block or absorb it
* into a block aggregator.
diff --git a/src/H5O.c b/src/H5O.c
index c590c50..876174a 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -1489,7 +1489,7 @@ H5O_link_oh(H5F_t *f, int adjust, hid_t dxpl_id, H5O_t *oh, hbool_t *deleted)
oh->nlink += adjust;
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
/* Check if the object should be deleted */
@@ -1521,7 +1521,7 @@ H5O_link_oh(H5F_t *f, int adjust, hid_t dxpl_id, H5O_t *oh, hbool_t *deleted)
oh->nlink += adjust;
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
} /* end if */
@@ -1660,6 +1660,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
udata.common.mesgs_modified = FALSE;
HDmemset(&cont_msg_info, 0, sizeof(cont_msg_info));
udata.common.cont_msg_info = &cont_msg_info;
+ udata.common.addr = loc->addr;
/* Lock the object header into the cache */
if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, &udata, prot)))
@@ -1696,7 +1697,8 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* Bring the chunk into the cache */
/* (which adds to the object header) */
- chk_udata.chunk_size = cont_msg_info.msgs[curr_msg].size;
+ chk_udata.common.addr = cont_msg_info.msgs[curr_msg].addr;
+ chk_udata.size = cont_msg_info.msgs[curr_msg].size;
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, &chk_udata, prot)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk")
@@ -1762,6 +1764,9 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* Check for any messages that were modified while being read in */
if(udata.common.mesgs_modified && prot != H5AC_WRITE)
oh->mesgs_modified = TRUE;
+
+ /* Reset the field that contained chunk 0's size during speculative load */
+ oh->chunk0_size = 0;
} /* end if */
/* Take care of loose ends for modifications made while bringing in the
@@ -1772,7 +1777,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* (usually through updating the # of object header messages) */
if(oh->prefix_modified) {
/* Mark the header as dirty now */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, NULL, "unable to mark object header as dirty")
/* Reset flag */
@@ -1798,7 +1803,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk")
/* Unprotect chunk, marking it dirty */
- if(H5O_chunk_unprotect(loc->file, dxpl_id, chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(H5O_chunk_unprotect(loc->file, dxpl_id, chk_proxy, TRUE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, NULL, "unable to unprotect object header chunk")
} /* end if */
} /* end for */
@@ -1962,7 +1967,7 @@ herr_t
H5O_touch_oh(H5F_t *f, hid_t dxpl_id, H5O_t *oh, hbool_t force)
{
H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
+ hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
time_t now; /* Current time */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2016,7 +2021,7 @@ H5O_touch_oh(H5F_t *f, hid_t dxpl_id, H5O_t *oh, hbool_t force)
/* Mark the message as dirty */
oh->mesg[idx].dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
} /* end if */
else {
/* XXX: For now, update access time & change fields in the object header */
@@ -2024,14 +2029,14 @@ H5O_touch_oh(H5F_t *f, hid_t dxpl_id, H5O_t *oh, hbool_t force)
oh->atime = oh->ctime = now;
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
} /* end else */
} /* end if */
done:
/* Release chunk */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c
index 64d2f03..a06740c 100644
--- a/src/H5Oalloc.c
+++ b/src/H5Oalloc.c
@@ -59,8 +59,8 @@
/********************/
static herr_t H5O_add_gap(H5F_t *f, H5O_t *oh, unsigned chunkno,
- unsigned *chk_flags, unsigned idx, uint8_t *new_gap_loc, size_t new_gap_size);
-static herr_t H5O_eliminate_gap(H5O_t *oh, unsigned *chk_flags,
+ hbool_t *chk_dirtied, unsigned idx, uint8_t *new_gap_loc, size_t new_gap_size);
+static herr_t H5O_eliminate_gap(H5O_t *oh, hbool_t *chk_dirtied,
H5O_mesg_t *mesg, uint8_t *new_gap_loc, size_t new_gap_size);
static herr_t H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx,
const H5O_msg_class_t *new_type, void *new_native, size_t new_size);
@@ -108,7 +108,7 @@ H5FL_EXTERN(H5O_cont_t);
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_add_gap(H5F_t *f, H5O_t *oh, unsigned chunkno, unsigned *chk_flags,
+H5O_add_gap(H5F_t *f, H5O_t *oh, unsigned chunkno, hbool_t *chk_dirtied,
unsigned idx, uint8_t *new_gap_loc, size_t new_gap_size)
{
hbool_t merged_with_null; /* Whether the gap was merged with a null message */
@@ -120,7 +120,7 @@ H5O_add_gap(H5F_t *f, H5O_t *oh, unsigned chunkno, unsigned *chk_flags,
/* check args */
HDassert(oh);
HDassert(oh->version > H5O_VERSION_1);
- HDassert(chk_flags);
+ HDassert(chk_dirtied);
HDassert(new_gap_loc);
HDassert(new_gap_size);
@@ -148,7 +148,7 @@ if(chunkno > 0) {
HDassert(oh->chunk[chunkno].gap == 0);
/* Eliminate the gap in the chunk */
- if(H5O_eliminate_gap(oh, chk_flags, &oh->mesg[u], new_gap_loc, new_gap_size) < 0)
+ if(H5O_eliminate_gap(oh, chk_dirtied, &oh->mesg[u], new_gap_loc, new_gap_size) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't eliminate gap in chunk")
/* Set flag to indicate that the gap was handled */
@@ -205,7 +205,7 @@ if(chunkno > 0) {
oh->chunk[chunkno].gap = new_gap_size;
/* Mark the chunk as modified */
- *chk_flags |= H5AC__DIRTIED_FLAG;
+ *chk_dirtied = TRUE;
} /* end if */
done:
@@ -234,18 +234,18 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5O_eliminate_gap(H5O_t *oh, unsigned *chk_flags, H5O_mesg_t *mesg,
+H5O_eliminate_gap(H5O_t *oh, hbool_t *chk_dirtied, H5O_mesg_t *mesg,
uint8_t *gap_loc, size_t gap_size)
{
uint8_t *move_start, *move_end; /* Pointers to area of messages to move */
- hbool_t null_before_gap; /* Flag whether the null message is before the gap or not */
+ hbool_t null_before_gap; /* Flag whether the null message is before the gap or not */
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5O_eliminate_gap)
/* check args */
HDassert(oh);
HDassert(oh->version > H5O_VERSION_1);
- HDassert(chk_flags);
+ HDassert(chk_dirtied);
HDassert(mesg);
HDassert(gap_loc);
HDassert(gap_size);
@@ -314,7 +314,7 @@ H5O_eliminate_gap(H5O_t *oh, unsigned *chk_flags, H5O_mesg_t *mesg,
/* Mark null message as dirty */
mesg->dirty = TRUE;
- *chk_flags |= H5AC__DIRTIED_FLAG;
+ *chk_dirtied = TRUE;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5O_eliminate_gap() */
@@ -339,7 +339,7 @@ H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx,
const H5O_msg_class_t *new_type, void *new_native, size_t new_size)
{
H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
+ hbool_t chk_dirtied = FALSE; /* Flags for unprotecting chunk */
H5O_mesg_t *alloc_msg; /* Pointer to null message to allocate out of */
herr_t ret_value = SUCCEED; /* Return value */
@@ -367,7 +367,7 @@ H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx,
alloc_msg->raw_size = new_size;
/* Add the gap to the chunk */
- if(H5O_add_gap(f, oh, alloc_msg->chunkno, &chk_flags, null_idx, alloc_msg->raw + alloc_msg->raw_size, gap_size) < 0)
+ if(H5O_add_gap(f, oh, alloc_msg->chunkno, &chk_dirtied, null_idx, alloc_msg->raw + alloc_msg->raw_size, gap_size) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk")
} /* end if */
else {
@@ -393,14 +393,14 @@ H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx,
/* Mark the message as dirty */
null_msg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Check for gap in new null message's chunk */
if(oh->chunk[null_msg->chunkno].gap > 0) {
unsigned null_chunkno = null_msg->chunkno; /* Chunk w/gap */
/* Eliminate the gap in the chunk */
- if(H5O_eliminate_gap(oh, &chk_flags, null_msg,
+ if(H5O_eliminate_gap(oh, &chk_dirtied, null_msg,
((oh->chunk[null_chunkno].image + oh->chunk[null_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[null_chunkno].gap)),
oh->chunk[null_chunkno].gap) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk")
@@ -417,11 +417,11 @@ H5O_alloc_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned null_idx,
/* Mark the new message as dirty */
alloc_msg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
done:
/* Release chunk */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
@@ -507,8 +507,8 @@ static htri_t
H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno,
size_t size, int *msg_idx)
{
- H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
+ H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
+ hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
size_t delta; /* Change in chunk's size */
size_t aligned_size = H5O_ALIGN_OH(oh, size);
uint8_t *old_image; /* Old address of chunk's image in memory */
@@ -583,6 +583,10 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno,
} /* end if */
} /* end if */
+ /* Protect chunk */
+ if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, chunkno)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
+
/* Determine whether the chunk can be extended */
extended = H5MF_try_extend(f, dxpl_id, H5FD_MEM_OHDR, oh->chunk[chunkno].addr,
(hsize_t)(oh->chunk[chunkno].size), (hsize_t)(delta + extra_prfx_size));
@@ -591,17 +595,13 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno,
else if(extended == FALSE) /* can't extend -- we are done */
HGOTO_DONE(FALSE)
- /* Protect chunk */
- if(NULL == (chk_proxy = H5O_chunk_protect(f, dxpl_id, oh, chunkno)))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
-
/* Adjust object header prefix flags */
if(adjust_size_flags) {
oh->flags &= (uint8_t)~H5O_HDR_CHUNK0_SIZE;
oh->flags |= new_size_flags;
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
} /* end if */
@@ -632,7 +632,7 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno,
/* Mark the extended message as dirty */
oh->mesg[extend_msg].dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Allocate more memory space for chunk's image */
old_image = oh->chunk[chunkno].image;
@@ -663,7 +663,7 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno,
if(chunkno > 0 && (H5O_CONT_ID == oh->mesg[u].type->id) &&
(((H5O_cont_t *)(oh->mesg[u].native))->chunkno == chunkno)) {
H5O_chunk_proxy_t *chk_proxy2 = NULL; /* Chunk that continuation message is in */
- unsigned chk_flags2 = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
+ hbool_t chk_dirtied2 = FALSE; /* Flag for unprotecting chunk */
unsigned cont_chunkno = oh->mesg[u].chunkno; /* Chunk # for continuation message */
/* Protect chunk containing continuation message */
@@ -676,23 +676,24 @@ H5O_alloc_extend_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno,
/* Flag continuation message as dirty */
oh->mesg[u].dirty = TRUE;
- chk_flags2 |= H5AC__DIRTIED_FLAG;
+ chk_dirtied2 = TRUE;
/* Release chunk containing continuation message */
- if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy2, chk_flags2) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy2, chk_dirtied2) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
} /* end if */
} /* end for */
- /* Mark the chunk size in the cache as changed */
- chk_flags |= H5AC__SIZE_CHANGED_FLAG;
+ /* Resize the chunk in the cache */
+ if(H5O_chunk_resize(oh, chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTRESIZE, FAIL, "unable to resize object header chunk")
/* Set return value */
*msg_idx = extend_msg;
done:
/* Release chunk */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
@@ -993,7 +994,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size)
oh->chunk[chunkno - 1].gap = 0;
/* Release chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy, TRUE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
} else if(found_null < 0) {
/* Move message (that will be replaced with continuation message)
@@ -1059,7 +1060,7 @@ H5O_alloc_new_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, size_t size)
null_msg->dirty = TRUE;
/* Release chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy, TRUE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
} /* end if */
HDassert(found_null >= 0);
@@ -1171,7 +1172,7 @@ H5O_alloc(H5F_t *f, hid_t dxpl_id, H5O_t *oh, const H5O_msg_class_t *type,
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't split null message")
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
/* Set return value */
@@ -1200,9 +1201,9 @@ herr_t
H5O_release_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_mesg_t *mesg,
hbool_t adj_link)
{
- H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
+ hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5O_release_mesg, FAIL)
@@ -1235,12 +1236,12 @@ H5O_release_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_mesg_t *mesg,
/* Mark the message as modified */
mesg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Check if chunk has a gap currently */
if(oh->chunk[mesg->chunkno].gap) {
/* Eliminate the gap in the chunk */
- if(H5O_eliminate_gap(oh, &chk_flags, mesg,
+ if(H5O_eliminate_gap(oh, &chk_dirtied, mesg,
((oh->chunk[mesg->chunkno].image + oh->chunk[mesg->chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[mesg->chunkno].gap)),
oh->chunk[mesg->chunkno].gap) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk")
@@ -1248,7 +1249,7 @@ H5O_release_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, H5O_mesg_t *mesg,
done:
/* Release chunk, if not already done */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
@@ -1274,7 +1275,7 @@ H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u)
H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that continuation message is in */
H5O_mesg_t *cont_msg; /* Pointer to the continuation message */
unsigned deleted_chunkno; /* Chunk # to delete */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
+ hbool_t chk_dirtied = FALSE; /* Flags for unprotecting chunk */
htri_t ret_value = TRUE; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_move_cont)
@@ -1353,7 +1354,7 @@ H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u)
curr_msg->raw = move_start + H5O_SIZEOF_MSGHDR_OH(oh);
curr_msg->chunkno = cont_chunkno;
curr_msg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Adjust location to move messages to */
move_start += move_size;
@@ -1370,13 +1371,13 @@ H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u)
cont_msg->raw_size = gap_size - H5O_SIZEOF_MSGHDR_OH(oh);
cont_msg->raw = move_start + H5O_SIZEOF_MSGHDR_OH(oh);
cont_msg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
} /* end if */
else {
/* Check if there is space that should be a gap */
if(gap_size > 0) {
/* Convert remnant into gap in chunk */
- if(H5O_add_gap(f, oh, cont_chunkno, &chk_flags, cont_u, move_start, gap_size) < 0)
+ if(H5O_add_gap(f, oh, cont_chunkno, &chk_dirtied, cont_u, move_start, gap_size) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk")
} /* end if */
@@ -1396,7 +1397,7 @@ H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u)
if(curr_msg->type->id == H5O_NULL_ID) {
/* Release any information/memory for message */
H5O_msg_free_mesg(curr_msg);
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Remove from message list */
if(v < (oh->nmesgs - 1))
@@ -1417,7 +1418,7 @@ H5O_move_cont(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned cont_u)
done:
/* Release chunk, if not already done */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
@@ -1503,7 +1504,7 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
curr_msg->dirty = TRUE;
/* Release chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, TRUE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
/* Set the flag to indicate that the null message
@@ -1543,10 +1544,10 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
for(v = 0, null_msg = &oh->mesg[0]; v < oh->nmesgs; v++, null_msg++) {
if(H5O_NULL_ID == null_msg->type->id && curr_msg->chunkno > null_msg->chunkno
&& curr_msg->raw_size <= null_msg->raw_size) {
- H5O_chunk_proxy_t *null_chk_proxy; /* Chunk that null message is in */
- H5O_chunk_proxy_t *curr_chk_proxy; /* Chunk that message is in */
- unsigned null_chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting null chunk */
- unsigned curr_chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting curr chunk */
+ H5O_chunk_proxy_t *null_chk_proxy; /* Chunk that null message is in */
+ H5O_chunk_proxy_t *curr_chk_proxy; /* Chunk that message is in */
+ unsigned null_chk_dirtied = FALSE; /* Flags for unprotecting null chunk */
+ unsigned curr_chk_dirtied = FALSE; /* Flags for unprotecting curr chunk */
unsigned old_chunkno; /* Old message information */
uint8_t *old_raw;
@@ -1566,7 +1567,7 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* Point non-null message at null message's space */
curr_msg->chunkno = null_msg->chunkno;
curr_msg->raw = null_msg->raw;
- curr_chk_flags |= H5AC__DIRTIED_FLAG;
+ curr_chk_dirtied = TRUE;
/* Change information for null message */
if(curr_msg->raw_size == null_msg->raw_size) {
@@ -1577,23 +1578,23 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* Mark null message dirty */
null_msg->dirty = TRUE;
- null_chk_flags |= H5AC__DIRTIED_FLAG;
+ null_chk_dirtied = TRUE;
/* Release current chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_flags) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_dirtied) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
/* Check for gap in null message's chunk */
if(oh->chunk[old_chunkno].gap > 0) {
/* Eliminate the gap in the chunk */
- if(H5O_eliminate_gap(oh, &null_chk_flags, null_msg,
+ if(H5O_eliminate_gap(oh, &null_chk_dirtied, null_msg,
((oh->chunk[old_chunkno].image + oh->chunk[old_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[old_chunkno].gap)),
oh->chunk[old_chunkno].gap) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk")
} /* end if */
/* Release null chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, null_chk_flags) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, null_chk_dirtied) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
} /* end if */
else {
@@ -1608,10 +1609,10 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* Mark null message dirty */
null_msg->dirty = TRUE;
- null_chk_flags |= H5AC__DIRTIED_FLAG;
+ null_chk_dirtied = TRUE;
/* Add the gap to the chunk */
- if(H5O_add_gap(f, oh, null_msg->chunkno, &null_chk_flags, v, null_msg->raw + null_msg->raw_size, gap_size) < 0)
+ if(H5O_add_gap(f, oh, null_msg->chunkno, &null_chk_dirtied, v, null_msg->raw + null_msg->raw_size, gap_size) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "can't insert gap in chunk")
/* Re-use message # for new null message taking place of non-null message */
@@ -1624,7 +1625,7 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* Mark null message dirty */
null_msg->dirty = TRUE;
- null_chk_flags |= H5AC__DIRTIED_FLAG;
+ null_chk_dirtied = TRUE;
/* Create new null message for previous location of non-null message */
if(oh->nmesgs >= oh->alloc_nmesgs) {
@@ -1640,7 +1641,7 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
} /* end else */
/* Release null message's chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, null_chk_flags) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, null_chk_proxy, null_chk_dirtied) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
/* Initialize new null message to take over non-null message's location */
@@ -1652,19 +1653,19 @@ H5O_move_msgs_forward(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* Mark new null message dirty */
oh->mesg[new_null_msg].dirty = TRUE;
- curr_chk_flags |= H5AC__DIRTIED_FLAG;
+ curr_chk_dirtied = TRUE;
/* Check for gap in new null message's chunk */
if(oh->chunk[old_chunkno].gap > 0) {
/* Eliminate the gap in the chunk */
- if(H5O_eliminate_gap(oh, &curr_chk_flags, &oh->mesg[new_null_msg],
+ if(H5O_eliminate_gap(oh, &curr_chk_dirtied, &oh->mesg[new_null_msg],
((oh->chunk[old_chunkno].image + oh->chunk[old_chunkno].size) - (H5O_SIZEOF_CHKSUM_OH(oh) + oh->chunk[old_chunkno].gap)),
oh->chunk[old_chunkno].gap) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTREMOVE, FAIL, "can't eliminate gap in chunk")
} /* end if */
/* Release new null message's chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_flags) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, curr_chk_dirtied) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
} /* end else */
@@ -1790,7 +1791,7 @@ H5O_merge_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
curr_msg->dirty = TRUE;
/* Release new null message's chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, curr_chk_proxy, TRUE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
/* Remove second message from list of messages */
@@ -2263,20 +2264,24 @@ H5O_alloc_shrink_chunk(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned chunkno)
curr_msg->dirty = TRUE;
/* Release chunk, marking it dirty */
- if(H5O_chunk_unprotect(f, dxpl_id, cont_chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, cont_chk_proxy, TRUE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
} /* end if */
} /* end for */
HDassert(new_size <= old_size);
+ /* Resize the chunk in the cache */
+ if(H5O_chunk_resize(oh, chk_proxy) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTRESIZE, FAIL, "unable to resize object header chunk")
+
/* Free the unused space in the file */
if(H5MF_xfree(f, H5FD_MEM_OHDR, dxpl_id, chunk->addr + new_size, (hsize_t)(old_size - new_size)) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to shrink object header chunk")
done:
/* Release chunk, marking it dirty */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, H5AC__DIRTIED_FLAG) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, TRUE) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c
index 8b846c7..9f0f189 100644
--- a/src/H5Oattribute.c
+++ b/src/H5Oattribute.c
@@ -833,8 +833,8 @@ H5O_attr_write_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
{
H5O_iter_wrt_t *udata = (H5O_iter_wrt_t *)_udata; /* Operator user data */
H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
- herr_t ret_value = H5_ITER_CONT; /* Return value */
+ hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
+ herr_t ret_value = H5_ITER_CONT; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_attr_write_cb)
@@ -862,10 +862,10 @@ H5O_attr_write_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
/* Mark the message as modified */
mesg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Release chunk */
- if(H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_flags) < 0)
+ if(H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_dirtied) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to unprotect object header chunk")
chk_proxy = NULL;
@@ -886,7 +886,7 @@ H5O_attr_write_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
done:
/* Release chunk, if not already done */
- if(chk_proxy && H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
@@ -1035,8 +1035,8 @@ H5O_attr_rename_mod_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
{
H5O_iter_ren_t *udata = (H5O_iter_ren_t *)_udata; /* Operator user data */
H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
- herr_t ret_value = H5_ITER_CONT; /* Return value */
+ hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
+ herr_t ret_value = H5_ITER_CONT; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_attr_rename_mod_cb)
@@ -1063,10 +1063,10 @@ H5O_attr_rename_mod_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
/* Mark the message as modified */
mesg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Release chunk */
- if(H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_flags) < 0)
+ if(H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_dirtied) < 0)
HGOTO_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to unprotect object header chunk")
chk_proxy = NULL;
@@ -1130,7 +1130,7 @@ H5O_attr_rename_mod_cb(H5O_t *oh, H5O_mesg_t *mesg/*in,out*/,
done:
/* Release chunk, if not already done */
- if(chk_proxy && H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(udata->f, udata->dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_ATTR, H5E_CANTUNPROTECT, H5_ITER_ERROR, "unable to unprotect object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index f2d5907..9e909a6 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -333,7 +333,7 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
buf = read_buf;
/* Parse the first chunk */
- if(H5O_chunk_deserialize(oh, addr, oh->chunk0_size, buf, &(udata->common), &oh->cache_info.is_dirty) < 0)
+ if(H5O_chunk_deserialize(oh, udata->common.addr, oh->chunk0_size, buf, &(udata->common), &oh->cache_info.is_dirty) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk")
/* Note that we've loaded the object header from the file */
@@ -618,7 +618,10 @@ H5O_size(const H5F_t UNUSED *f, const H5O_t *oh, size_t *size_ptr)
HDassert(size_ptr);
/* Report the object header's prefix+first chunk length */
- *size_ptr = (size_t)H5O_SIZEOF_HDR(oh) + oh->chunk0_size;
+ if(oh->chunk0_size)
+ *size_ptr = H5O_SIZEOF_HDR(oh) + oh->chunk0_size;
+ else
+ *size_ptr = oh->chunk[0].size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5O_size() */
@@ -665,11 +668,11 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't wrap buffer")
/* Get a pointer to a buffer that's large enough for serialized header */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, udata->chunk_size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, udata->size)))
HGOTO_ERROR(H5E_OHDR, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read rest of the raw data */
- if(H5F_block_read(f, H5FD_MEM_OHDR, addr, udata->chunk_size, dxpl_id, buf) < 0)
+ if(H5F_block_read(f, H5FD_MEM_OHDR, addr, udata->size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_READERROR, NULL, "unable to read object header continuation chunk")
/* Check if we are still decoding the object header */
@@ -680,7 +683,7 @@ H5O_cache_chk_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
HDassert(udata->common.cont_msg_info);
/* Parse the chunk */
- if(H5O_chunk_deserialize(udata->oh, addr, udata->chunk_size, buf, &(udata->common), &chk_proxy->cache_info.is_dirty) < 0)
+ if(H5O_chunk_deserialize(udata->oh, udata->common.addr, udata->size, buf, &(udata->common), &chk_proxy->cache_info.is_dirty) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize object header chunk")
/* Set the fields for the chunk proxy */
@@ -969,6 +972,7 @@ H5O_chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len, const uint8_t *image,
/* Check arguments */
HDassert(oh);
+ HDassert(H5F_addr_defined(addr));
HDassert(image);
HDassert(udata->f);
HDassert(udata->cont_msg_info);
diff --git a/src/H5Ochunk.c b/src/H5Ochunk.c
index 4cacca3..79c4442 100644
--- a/src/H5Ochunk.c
+++ b/src/H5Ochunk.c
@@ -181,7 +181,7 @@ H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
HDmemset(&chk_udata, 0, sizeof(chk_udata));
chk_udata.oh = oh;
chk_udata.chunkno = idx;
- chk_udata.chunk_size = oh->chunk[idx].size;
+ chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
@@ -216,7 +216,7 @@ done:
*/
herr_t
H5O_chunk_unprotect(H5F_t *f, hid_t dxpl_id, H5O_chunk_proxy_t *chk_proxy,
- unsigned chk_flags)
+ hbool_t dirtied)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -225,26 +225,15 @@ H5O_chunk_unprotect(H5F_t *f, hid_t dxpl_id, H5O_chunk_proxy_t *chk_proxy,
/* check args */
HDassert(f);
HDassert(chk_proxy);
- HDassert(!(chk_flags & (unsigned)~(H5AC__DIRTIED_FLAG | H5AC__SIZE_CHANGED_FLAG)));
/* Check for releasing first chunk */
if(0 == chk_proxy->chunkno) {
- /* Check for resizing the first chunk */
- if(chk_flags & H5AC__SIZE_CHANGED_FLAG) {
- /* Resize object header in cache */
- if(H5AC_resize_pinned_entry(chk_proxy->oh, chk_proxy->oh->chunk[0].size) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTRESIZE, FAIL, "unable to resize chunk in cache")
- } /* end if */
/* Check for dirtying the first chunk */
- else if(chk_flags & H5AC__DIRTIED_FLAG) {
+ if(dirtied) {
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(chk_proxy->oh) < 0)
+ if(H5AC_mark_entry_dirty(chk_proxy->oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
} /* end else/if */
- else {
- /* Sanity check */
- HDassert(0 && "Unknown chunk proxy flag(s)?!?");
- } /* end else */
/* Decrement reference count of object header */
if(H5O_dec_rc(chk_proxy->oh) < 0)
@@ -254,8 +243,8 @@ H5O_chunk_unprotect(H5F_t *f, hid_t dxpl_id, H5O_chunk_proxy_t *chk_proxy,
chk_proxy = H5FL_FREE(H5O_chunk_proxy_t, chk_proxy);
} /* end if */
else {
- /* Release the chunk proxy from the cache, marking it dirty */
- if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_CHK, chk_proxy->oh->chunk[chk_proxy->chunkno].addr, chk_proxy, chk_flags) < 0)
+ /* Release the chunk proxy from the cache, possibly marking it dirty */
+ if(H5AC_unprotect(f, dxpl_id, H5AC_OHDR_CHK, chk_proxy->oh->chunk[chk_proxy->chunkno].addr, chk_proxy, (dirtied ? H5AC__DIRTIED_FLAG : H5AC__NO_FLAGS_SET)) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header chunk")
} /* end else */
@@ -265,6 +254,48 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5O_chunk_resize
+ *
+ * Purpose: Resize an object header chunk
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * koziol@hdfgroup.org
+ * May 6 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_chunk_resize(H5O_t *oh, H5O_chunk_proxy_t *chk_proxy)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(H5O_chunk_resize, FAIL)
+
+ /* check args */
+ HDassert(oh);
+ HDassert(chk_proxy);
+
+ /* Check for resizing first chunk */
+ if(0 == chk_proxy->chunkno) {
+ /* Resize object header in cache */
+ if(H5AC_resize_entry(oh, oh->chunk[0].size) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTRESIZE, FAIL, "unable to resize chunk in cache")
+ } /* end if */
+ else {
+ /* Resize chunk in cache */
+ if(H5AC_resize_entry(chk_proxy, oh->chunk[chk_proxy->chunkno].size) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTRESIZE, FAIL, "unable to resize chunk in cache")
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_chunk_resize() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5O_chunk_update_idx
*
* Purpose: Update the chunk index for a chunk proxy
@@ -298,7 +329,7 @@ H5O_chunk_update_idx(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
HDmemset(&chk_udata, 0, sizeof(chk_udata));
chk_udata.oh = oh;
chk_udata.chunkno = idx;
- chk_udata.chunk_size = oh->chunk[idx].size;
+ chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
@@ -350,7 +381,7 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
HDmemset(&chk_udata, 0, sizeof(chk_udata));
chk_udata.oh = oh;
chk_udata.chunkno = idx;
- chk_udata.chunk_size = oh->chunk[idx].size;
+ chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
diff --git a/src/H5Ofill.c b/src/H5Ofill.c
index ffea9e6..ebe1eb5 100644
--- a/src/H5Ofill.c
+++ b/src/H5Ofill.c
@@ -965,6 +965,7 @@ H5O_fill_convert(H5O_fill_t *fill, H5T_t *dset_type, hbool_t *fill_changed, hid_
/* Update the fill message */
if(buf != fill->buf) {
+ H5T_vlen_reclaim_elmt(fill->buf, fill->type, dxpl_id);
H5MM_xfree(fill->buf);
fill->buf = buf;
} /* end if */
diff --git a/src/H5Omessage.c b/src/H5Omessage.c
index c39db61..0dd4565 100644
--- a/src/H5Omessage.c
+++ b/src/H5Omessage.c
@@ -1328,7 +1328,7 @@ done:
HDONE_ERROR(H5E_OHDR, H5E_CANTUPDATE, FAIL, "unable to update time on object")
/* Mark object header as dirty in cache */
- if(H5AC_mark_pinned_or_protected_entry_dirty(oh) < 0)
+ if(H5AC_mark_entry_dirty(oh) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTMARKDIRTY, FAIL, "unable to mark object header as dirty")
} /* end if */
@@ -1956,7 +1956,7 @@ H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx,
{
H5O_chunk_proxy_t *chk_proxy = NULL; /* Chunk that message is in */
H5O_mesg_t *idx_msg = &oh->mesg[idx]; /* Pointer to message to modify */
- unsigned chk_flags = H5AC__NO_FLAGS_SET; /* Flags for unprotecting chunk */
+ hbool_t chk_dirtied = FALSE; /* Flag for unprotecting chunk */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT(H5O_copy_mesg)
@@ -1984,10 +1984,10 @@ H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx,
/* Mark the message as modified */
idx_msg->dirty = TRUE;
- chk_flags |= H5AC__DIRTIED_FLAG;
+ chk_dirtied = TRUE;
/* Release chunk */
- if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header chunk")
chk_proxy = NULL;
@@ -1998,7 +1998,7 @@ H5O_copy_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx,
done:
/* Release chunk, if not already released */
- if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_flags) < 0)
+ if(chk_proxy && H5O_chunk_unprotect(f, dxpl_id, chk_proxy, chk_dirtied) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header chunk")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Opkg.h b/src/H5Opkg.h
index 9351f50..6ff20b2 100644
--- a/src/H5Opkg.h
+++ b/src/H5Opkg.h
@@ -350,6 +350,7 @@ typedef struct H5O_common_cache_ud_t {
unsigned merged_null_msgs; /* Number of null messages merged together */
hbool_t mesgs_modified; /* Whether any messages were modified when the object header was deserialized */
H5O_cont_msgs_t *cont_msg_info; /* Pointer to continuation messages to work on */
+ haddr_t addr; /* Address of the prefix or chunk */
} H5O_common_cache_ud_t;
/* Callback information for loading object header prefix from disk */
@@ -373,7 +374,7 @@ typedef struct H5O_chk_cache_ud_t {
hbool_t decoding; /* Whether the object header is being decoded */
H5O_t *oh; /* Object header for this chunk */
unsigned chunkno; /* Index of chunk being brought in (for re-loads) */
- size_t chunk_size; /* Chunk size */
+ size_t size; /* Size of chunk in the file */
H5O_common_cache_ud_t common; /* Common object header cache callback info */
} H5O_chk_cache_ud_t;
@@ -558,8 +559,9 @@ H5_DLL herr_t H5O_chunk_add(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx);
H5_DLL H5O_chunk_proxy_t *H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
unsigned idx);
H5_DLL herr_t H5O_chunk_unprotect(H5F_t *f, hid_t dxpl_id,
- H5O_chunk_proxy_t *chk_proxy, unsigned chk_flags);
+ H5O_chunk_proxy_t *chk_proxy, hbool_t chk_dirtied);
H5_DLL herr_t H5O_chunk_update_idx(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx);
+H5_DLL herr_t H5O_chunk_resize(H5O_t *oh, H5O_chunk_proxy_t *chk_proxy);
H5_DLL herr_t H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx);
/* Collect storage info for btree and heap */
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 21fbdee..9624989 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -1377,7 +1377,7 @@ H5Pset_scaleoffset(hid_t plist_id, H5Z_SO_scale_type_t scale_type, int scale_fac
HGOTO_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
if(scale_factor < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "scale factor must be > 0")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "scale factor must be >= 0")
if(scale_type!=H5Z_SO_FLOAT_DSCALE && scale_type!=H5Z_SO_FLOAT_ESCALE && scale_type!=H5Z_SO_INT)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid scale type")
diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c
index 07b66e7..10922da 100644
--- a/src/H5Pfapl.c
+++ b/src/H5Pfapl.c
@@ -1330,10 +1330,6 @@ done:
* Programmer: J. Mainzer
* Thursday, April 7, 2005
*
- * Modifications:
- *
- * Done.
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1382,10 +1378,6 @@ done:
* Programmer: J. Mainzer
* Thursday, April 7, 2005
*
- * Modifications:
- *
- * None.
- *
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 1c0bb61..09eb796 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -24,7 +24,7 @@
/* Public headers needed by this file */
#include "H5public.h"
-#include "H5Cpublic.h"
+#include "H5ACpublic.h"
#include "H5Dpublic.h"
#include "H5Fpublic.h"
#include "H5FDpublic.h"
diff --git a/src/H5SM.c b/src/H5SM.c
index 19e5f8a..53d8b9f 100755
--- a/src/H5SM.c
+++ b/src/H5SM.c
@@ -124,14 +124,12 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
H5O_shmesg_table_t sohm_table; /* SOHM message for superblock extension */
H5SM_master_table_t *table = NULL; /* SOHM master table for file */
haddr_t table_addr = HADDR_UNDEF; /* Address of SOHM master table in file */
- unsigned num_indexes; /* Number of SOHM indices */
unsigned list_max, btree_min; /* Phase change limits for SOHM indices */
unsigned index_type_flags[H5O_SHMESG_MAX_NINDEXES]; /* Messages types stored in each index */
unsigned minsizes[H5O_SHMESG_MAX_NINDEXES]; /* Message size sharing threshhold for each index */
unsigned type_flags_used; /* Message type flags used, for sanity checking */
- hsize_t table_size; /* Size of SOHM master table in file */
unsigned x; /* Local index variable */
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_TAG(H5SM_init, dxpl_id, H5AC__SOHM_TAG, FAIL)
@@ -139,39 +137,38 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
/* File should not already have a SOHM table */
HDassert(f->shared->sohm_addr == HADDR_UNDEF);
+ /* Initialize master table */
+ if(NULL == (table = H5FL_MALLOC(H5SM_master_table_t)))
+ HGOTO_ERROR(H5E_SOHM, H5E_CANTALLOC, FAIL, "memory allocation failed for SOHM table")
+ table->num_indexes = f->shared->sohm_nindexes;
+ table->table_size = H5SM_TABLE_SIZE(f);
+
/* Get information from fcpl */
- if(H5P_get(fc_plist, H5F_CRT_SHMSG_NINDEXES_NAME, &num_indexes)<0)
- HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "can't get number of indexes")
- if(H5P_get(fc_plist, H5F_CRT_SHMSG_INDEX_TYPES_NAME, &index_type_flags)<0)
+ if(H5P_get(fc_plist, H5F_CRT_SHMSG_INDEX_TYPES_NAME, &index_type_flags) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "can't get SOHM type flags")
- if(H5P_get(fc_plist, H5F_CRT_SHMSG_LIST_MAX_NAME, &list_max)<0)
+ if(H5P_get(fc_plist, H5F_CRT_SHMSG_LIST_MAX_NAME, &list_max) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "can't get SOHM list maximum")
- if(H5P_get(fc_plist, H5F_CRT_SHMSG_BTREE_MIN_NAME, &btree_min)<0)
+ if(H5P_get(fc_plist, H5F_CRT_SHMSG_BTREE_MIN_NAME, &btree_min) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "can't get SOHM btree minimum")
if(H5P_get(fc_plist, H5F_CRT_SHMSG_INDEX_MINSIZE_NAME, &minsizes) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "can't get SOHM message min sizes")
/* Verify that values are valid */
- if(num_indexes > H5O_SHMESG_MAX_NINDEXES)
+ if(table->num_indexes > H5O_SHMESG_MAX_NINDEXES)
HGOTO_ERROR(H5E_SOHM, H5E_BADRANGE, FAIL, "number of indexes in property list is too large")
/* Check that type flags weren't duplicated anywhere */
type_flags_used = 0;
- for(x = 0; x < num_indexes; ++x) {
+ for(x = 0; x < table->num_indexes; ++x) {
if(index_type_flags[x] & type_flags_used)
HGOTO_ERROR(H5E_SOHM, H5E_BADVALUE, FAIL, "the same shared message type flag is assigned to more than one index")
type_flags_used |= index_type_flags[x];
} /* end for */
- /* Initialize master table */
- if(NULL == (table = H5FL_MALLOC(H5SM_master_table_t)))
- HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "memory allocation failed for SOHM table")
-
- /* Set version and number of indexes in table and in superblock.
+ /* Check that number of indexes in table and in superblock make sense.
* Right now we just use one byte to hold the number of indexes.
*/
- HDassert(num_indexes < 256);
- table->num_indexes = num_indexes;
+ HDassert(table->num_indexes < 256);
/* Check that list and btree cutoffs make sense. There can't be any
* values greater than the list max but less than the btree min; the
@@ -188,8 +185,7 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
/* Initialize all of the indexes, but don't allocate space for them to
* hold messages until we actually need to write to them.
*/
- for(x = 0; x < table->num_indexes; x++)
- {
+ for(x = 0; x < table->num_indexes; x++) {
table->indexes[x].btree_min = btree_min;
table->indexes[x].list_max = list_max;
table->indexes[x].mesg_types = index_type_flags[x];
@@ -203,11 +199,13 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
table->indexes[x].index_type = H5SM_LIST;
else
table->indexes[x].index_type = H5SM_BTREE;
+
+ /* Compute the size of a list index for this SOHM index */
+ table->indexes[x].list_size = H5SM_LIST_SIZE(f, list_max);
} /* end for */
/* Allocate space for the table on disk */
- table_size = H5SM_TABLE_SIZE(f) + (table->num_indexes * H5SM_INDEX_HEADER_SIZE(f));
- if(HADDR_UNDEF == (table_addr = H5MF_alloc(f, H5FD_MEM_SOHM_TABLE, dxpl_id, table_size)))
+ if(HADDR_UNDEF == (table_addr = H5MF_alloc(f, H5FD_MEM_SOHM_TABLE, dxpl_id, (hsize_t)table->table_size)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "file allocation failed for SOHM table")
/* Cache the new table */
@@ -233,7 +231,7 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
done:
if(ret_value < 0) {
if(table_addr != HADDR_UNDEF)
- H5MF_xfree(f, H5FD_MEM_SOHM_TABLE, dxpl_id, table_addr, (hsize_t)H5SM_TABLE_SIZE(f));
+ H5MF_xfree(f, H5FD_MEM_SOHM_TABLE, dxpl_id, table_addr, (hsize_t)table->table_size);
if(table != NULL)
table = H5FL_FREE(H5SM_master_table_t, table);
} /* end if */
@@ -356,7 +354,12 @@ H5SM_type_shared(H5F_t *f, unsigned type_id, hid_t dxpl_id)
/* Look up the master SOHM table */
if(H5F_addr_defined(f->shared->sohm_addr)) {
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ)))
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
+
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
} /* end if */
else
@@ -396,6 +399,7 @@ herr_t
H5SM_get_fheap_addr(H5F_t *f, hid_t dxpl_id, unsigned type_id, haddr_t *fheap_addr)
{
H5SM_master_table_t *table = NULL; /* Shared object master table */
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
ssize_t index_num; /* Which index */
herr_t ret_value = SUCCEED; /* Return value */
@@ -405,8 +409,11 @@ H5SM_get_fheap_addr(H5F_t *f, hid_t dxpl_id, unsigned type_id, haddr_t *fheap_ad
HDassert(f);
HDassert(fheap_addr);
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Look up index for message type */
@@ -619,7 +626,6 @@ H5SM_create_list(H5F_t *f, H5SM_index_header_t *header, hid_t dxpl_id)
{
H5SM_list_t *list = NULL; /* List of messages */
hsize_t x; /* Counter variable */
- hsize_t size = 0; /* Size of list on disk */
size_t num_entries; /* Number of messages to create in list */
haddr_t addr = HADDR_UNDEF; /* Address of the list on disk */
haddr_t ret_value;
@@ -632,22 +638,20 @@ H5SM_create_list(H5F_t *f, H5SM_index_header_t *header, hid_t dxpl_id)
num_entries = header->list_max;
/* Allocate list in memory */
- if((list = H5FL_MALLOC(H5SM_list_t)) == NULL)
+ if(NULL == (list = H5FL_MALLOC(H5SM_list_t)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for SOHM list")
- if((list->messages = (H5SM_sohm_t *)H5FL_ARR_MALLOC(H5SM_sohm_t, num_entries)) == NULL)
+ if(NULL == (list->messages = (H5SM_sohm_t *)H5FL_ARR_CALLOC(H5SM_sohm_t, num_entries)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for SOHM list")
/* Initialize messages in list */
- HDmemset(list->messages, 0, sizeof(H5SM_sohm_t) * num_entries);
- for(x=0; x<num_entries; x++)
+ for(x = 0; x < num_entries; x++)
list->messages[x].location = H5SM_NO_LOC;
/* Point list at header passed in */
list->header = header;
/* Allocate space for the list on disk */
- size = H5SM_LIST_SIZE(f, num_entries);
- if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_SOHM_INDEX, dxpl_id, size)))
+ if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_SOHM_INDEX, dxpl_id, (hsize_t)header->list_size)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, HADDR_UNDEF, "file allocation failed for SOHM list")
/* Put the list into the cache */
@@ -665,7 +669,7 @@ done:
list = H5FL_FREE(H5SM_list_t, list);
} /* end if */
if(addr != HADDR_UNDEF)
- H5MF_xfree(f, H5FD_MEM_SOHM_INDEX, dxpl_id, addr, size);
+ H5MF_xfree(f, H5FD_MEM_SOHM_INDEX, dxpl_id, addr, (hsize_t)header->list_size);
} /* end if */
FUNC_LEAVE_NOAPI_TAG(ret_value, HADDR_UNDEF)
@@ -931,7 +935,12 @@ H5SM_can_share(H5F_t *f, hid_t dxpl_id, H5SM_master_table_t *table,
if(table)
my_table = table;
else {
- if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ)))
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
+
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
+ if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
} /* end if */
@@ -1021,6 +1030,7 @@ H5SM_try_share(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned type_id,
void *mesg, unsigned *mesg_flags)
{
H5SM_master_table_t *table = NULL;
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
unsigned cache_flags = H5AC__NO_FLAGS_SET;
ssize_t index_num;
htri_t tri_ret;
@@ -1036,8 +1046,11 @@ H5SM_try_share(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned type_id,
if(tri_ret == FALSE)
HGOTO_DONE(FALSE)
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_WRITE)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* "complex" sharing checks */
@@ -1416,6 +1429,7 @@ H5SM_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, H5O_shared_t *sh_mesg)
{
H5SM_master_table_t *table = NULL;
unsigned cache_flags = H5AC__NO_FLAGS_SET;
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
ssize_t index_num;
void *mesg_buf = NULL;
void *native_mesg = NULL;
@@ -1431,8 +1445,11 @@ H5SM_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, H5O_shared_t *sh_mesg)
/* Get message type */
type_id = sh_mesg->msg_type_id;
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_WRITE)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_WRITE)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index and try to delete from it */
@@ -1846,6 +1863,7 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
if((status = H5O_msg_exists(ext_loc, H5O_SHMESG_ID, dxpl_id)) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTGET, FAIL, "unable to read object header")
if(status) {
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
unsigned index_flags[H5O_SHMESG_MAX_NINDEXES]; /* Message flags for each index */
unsigned minsizes[H5O_SHMESG_MAX_NINDEXES]; /* Minimum message size for each index */
unsigned sohm_l2b; /* SOHM list-to-btree cutoff */
@@ -1867,8 +1885,11 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
HDassert(H5F_addr_defined(shared->sohm_addr));
HDassert(shared->sohm_nindexes > 0 && shared->sohm_nindexes <= H5O_SHMESG_MAX_NINDEXES);
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Read the rest of the SOHM table information from the cache */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, shared->sohm_addr, f, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, shared->sohm_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Get index conversion limits */
@@ -2008,6 +2029,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
H5HF_t *fheap = NULL; /* Fractal heap that contains shared messages */
H5B2_t *bt2 = NULL; /* v2 B-tree handle for index */
H5SM_master_table_t *table = NULL; /* SOHM master table */
+ H5SM_table_cache_ud_t tbl_cache_udata; /* User-data for callback */
H5SM_list_t *list = NULL; /* SOHM index list for message type (if in list form) */
H5SM_index_header_t *header=NULL; /* Index header for message type */
H5SM_mesg_key_t key; /* Key for looking up message */
@@ -2024,8 +2046,11 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
HDassert(sh_mesg);
HDassert(ref_count);
+ /* Set up user data for callback */
+ tbl_cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &tbl_cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index and find the message in it */
@@ -2056,15 +2081,15 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
/* Try to find the message in the index */
if(header->index_type == H5SM_LIST) {
- H5SM_list_cache_ud_t cache_udata; /* User-data for metadata cache callback */
+ H5SM_list_cache_ud_t lst_cache_udata; /* User-data for metadata cache callback */
size_t list_pos; /* Position of the message in the list */
/* Set up user data for metadata cache callback */
- cache_udata.f = f;
- cache_udata.header = header;
+ lst_cache_udata.f = f;
+ lst_cache_udata.header = header;
/* If the index is stored as a list, get it from the cache */
- if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_READ)))
+ if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &lst_cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* Find the message in the list */
@@ -2388,6 +2413,7 @@ H5SM_table_debug(H5F_t *f, hid_t dxpl_id, haddr_t table_addr,
unsigned table_vers, unsigned num_indexes)
{
H5SM_master_table_t *table = NULL; /* SOHM master table */
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
unsigned x; /* Counter variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2417,8 +2443,11 @@ H5SM_table_debug(H5F_t *f, hid_t dxpl_id, haddr_t table_addr,
if(num_indexes == 0 || num_indexes > H5O_SHMESG_MAX_NINDEXES)
HGOTO_ERROR(H5E_SOHM, H5E_BADVALUE, FAIL, "number of indexes must be between 1 and H5O_SHMESG_MAX_NINDEXES")
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, f, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
HDfprintf(stream, "%*sShared Message Master Table...\n", indent, "");
@@ -2563,6 +2592,7 @@ herr_t
H5SM_ih_size(H5F_t *f, hid_t dxpl_id, hsize_t *hdr_size, H5_ih_info_t *ih_info)
{
H5SM_master_table_t *table = NULL; /* SOHM master table */
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
H5HF_t *fheap = NULL; /* Fractal heap handle */
H5B2_t *bt2 = NULL; /* v2 B-tree handle for index */
unsigned u; /* Local index variable */
@@ -2576,12 +2606,15 @@ H5SM_ih_size(H5F_t *f, hid_t dxpl_id, hsize_t *hdr_size, H5_ih_info_t *ih_info)
HDassert(hdr_size);
HDassert(ih_info);
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Get SOHM header size */
- *hdr_size = H5SM_TABLE_SIZE(f) + (table->num_indexes * H5SM_INDEX_HEADER_SIZE(f));
+ *hdr_size = table->table_size;
/* Loop over all the indices for shared messages */
for(u = 0; u < table->num_indexes; u++) {
@@ -2603,7 +2636,7 @@ H5SM_ih_size(H5F_t *f, hid_t dxpl_id, hsize_t *hdr_size, H5_ih_info_t *ih_info)
} /* end if */
else {
HDassert(table->indexes[u].index_type == H5SM_LIST);
- ih_info->index_size += H5SM_LIST_SIZE(f, table->indexes[u].list_max);
+ ih_info->index_size += table->indexes[u].list_size;
} /* end else */
/* Check for heap for this index */
diff --git a/src/H5SMcache.c b/src/H5SMcache.c
index cc22dcd..4a9338b 100644
--- a/src/H5SMcache.c
+++ b/src/H5SMcache.c
@@ -119,7 +119,6 @@ static H5SM_master_table_t *
H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata)
{
H5SM_master_table_t *table = NULL;
- size_t size; /* Size of SOHM master table on disk */
H5WB_t *wb = NULL; /* Wrapped buffer for table data */
uint8_t tbl_buf[H5SM_TBL_BUF_SIZE]; /* Buffer for table */
uint8_t *buf; /* Reading buffer */
@@ -151,17 +150,17 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata)
if(NULL == (wb = H5WB_wrap(tbl_buf, sizeof(tbl_buf))))
HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, NULL, "can't wrap buffer")
- /* Compute the size of the SOHM table header on disk. This is the "table" itself
- * plus each index within the table
+ /* Compute the size of the SOHM table header on disk. This is the "table"
+ * itself plus each index within the table
*/
- size = H5SM_TABLE_SIZE(f) + (table->num_indexes * H5SM_INDEX_HEADER_SIZE(f));
+ table->table_size = H5SM_TABLE_SIZE(f);
/* Get a pointer to a buffer that's large enough for serialized table */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, table->table_size)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read header from disk */
- if(H5F_block_read(f, H5FD_MEM_SOHM_TABLE, addr, size, dxpl_id, buf) < 0)
+ if(H5F_block_read(f, H5FD_MEM_SOHM_TABLE, addr, table->table_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_READERROR, NULL, "can't read SOHM table")
/* Get temporary pointer to serialized table */
@@ -172,11 +171,6 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata)
HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, NULL, "bad SOHM table signature")
p += H5_SIZEOF_MAGIC;
- /* Don't count the checksum in the table size yet, since it comes after
- * all of the index headers
- */
- HDassert((size_t)(p - (const uint8_t *)buf) == H5SM_TABLE_SIZE(f) - H5SM_SIZEOF_CHECKSUM);
-
/* Allocate space for the index headers in memory*/
if(NULL == (table->indexes = (H5SM_index_header_t *)H5FL_ARR_MALLOC(H5SM_index_header_t, (size_t)table->num_indexes)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "memory allocation failed for SOHM indexes")
@@ -210,16 +204,19 @@ H5SM_table_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void UNUSED *udata)
/* Address of the index's heap */
H5F_addr_decode(f, &p, &(table->indexes[x].heap_addr));
+
+ /* Compute the size of a list index for this SOHM index */
+ table->indexes[x].list_size = H5SM_LIST_SIZE(f, table->indexes[x].list_max);
} /* end for */
/* Read in checksum */
UINT32DECODE(p, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - (const uint8_t *)buf) == size);
+ HDassert((size_t)(p - (const uint8_t *)buf) == table->table_size);
/* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(buf, (size - H5SM_SIZEOF_CHECKSUM), 0);
+ computed_chksum = H5_checksum_metadata(buf, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -270,7 +267,6 @@ H5SM_table_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_ma
if(table->cache_info.is_dirty) {
uint8_t *buf; /* Temporary buffer */
uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Header size on disk */
uint32_t computed_chksum; /* Computed metadata checksum value */
size_t x; /* Counter variable */
@@ -283,11 +279,8 @@ H5SM_table_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_ma
if(NULL == (wb = H5WB_wrap(tbl_buf, sizeof(tbl_buf))))
HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, FAIL, "can't wrap buffer")
- /* Encode the master table and all of the index headers as one big blob */
- size = H5SM_TABLE_SIZE(f) + (H5SM_INDEX_HEADER_SIZE(f) * table->num_indexes);
-
/* Get a pointer to a buffer that's large enough for serialized table */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, table->table_size)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "can't get actual buffer")
/* Get temporary pointer to buffer for serialized table */
@@ -328,12 +321,12 @@ H5SM_table_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_ma
} /* end for */
/* Compute checksum on buffer */
- computed_chksum = H5_checksum_metadata(buf, (size - H5SM_SIZEOF_CHECKSUM), 0);
+ computed_chksum = H5_checksum_metadata(buf, (table->table_size - H5SM_SIZEOF_CHECKSUM), 0);
UINT32ENCODE(p, computed_chksum);
/* Write the table to disk */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_SOHM_TABLE, addr, size, dxpl_id, buf) < 0)
+ HDassert((size_t)(p - buf) == table->table_size);
+ if(H5F_block_write(f, H5FD_MEM_SOHM_TABLE, addr, table->table_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTFLUSH, FAIL, "unable to save sohm table to disk")
table->cache_info.is_dirty = FALSE;
@@ -433,7 +426,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5SM_table_size(const H5F_t *f, const H5SM_master_table_t *table, size_t *size_ptr)
+H5SM_table_size(const H5F_t UNUSED *f, const H5SM_master_table_t *table, size_t *size_ptr)
{
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5SM_table_size)
@@ -443,7 +436,7 @@ H5SM_table_size(const H5F_t *f, const H5SM_master_table_t *table, size_t *size_p
HDassert(size_ptr);
/* Set size value */
- *size_ptr = H5SM_TABLE_SIZE(f) + (table->num_indexes * H5SM_INDEX_HEADER_SIZE(f));
+ *size_ptr = table->table_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5SM_table_size() */
@@ -467,7 +460,6 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
H5SM_list_t *list; /* The SOHM list being read in */
H5SM_list_cache_ud_t *udata = (H5SM_list_cache_ud_t *)_udata; /* User data for callback */
H5SM_bt2_ctx_t ctx; /* Message encoding context */
- size_t size; /* Size of SOHM list on disk */
H5WB_t *wb = NULL; /* Wrapped buffer for list index data */
uint8_t lst_buf[H5SM_LST_BUF_SIZE]; /* Buffer for list index */
uint8_t *buf; /* Reading buffer */
@@ -497,15 +489,12 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
if(NULL == (wb = H5WB_wrap(lst_buf, sizeof(lst_buf))))
HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, NULL, "can't wrap buffer")
- /* Compute the size of the SOHM list on disk */
- size = H5SM_LIST_SIZE(udata->f, udata->header->num_messages);
-
/* Get a pointer to a buffer that's large enough for serialized list index */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, udata->header->list_size)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read list from disk */
- if(H5F_block_read(f, H5FD_MEM_SOHM_INDEX, addr, size, dxpl_id, buf) < 0)
+ if(H5F_block_read(f, H5FD_MEM_SOHM_INDEX, addr, udata->header->list_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_READERROR, NULL, "can't read SOHM list")
/* Get temporary pointer to serialized list index */
@@ -528,10 +517,10 @@ H5SM_list_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
UINT32DECODE(p, stored_chksum);
/* Sanity check */
- HDassert((size_t)(p - buf) == size);
+ HDassert((size_t)(p - buf) <= udata->header->list_size);
/* Compute checksum on entire header */
- computed_chksum = H5_checksum_metadata(buf, (size - H5SM_SIZEOF_CHECKSUM), 0);
+ computed_chksum = H5_checksum_metadata(buf, ((size_t)(p - buf) - H5SM_SIZEOF_CHECKSUM), 0);
/* Verify checksum */
if(stored_chksum != computed_chksum)
@@ -589,7 +578,6 @@ H5SM_list_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_lis
H5SM_bt2_ctx_t ctx; /* Message encoding context */
uint8_t *buf; /* Temporary buffer */
uint8_t *p; /* Pointer into raw data buffer */
- size_t size; /* Header size on disk */
uint32_t computed_chksum; /* Computed metadata checksum value */
size_t mesgs_written; /* Number of messages written to list */
size_t x; /* Local index variable */
@@ -598,10 +586,8 @@ H5SM_list_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_lis
if(NULL == (wb = H5WB_wrap(lst_buf, sizeof(lst_buf))))
HGOTO_ERROR(H5E_SOHM, H5E_CANTINIT, FAIL, "can't wrap buffer")
- size = H5SM_LIST_SIZE(f, list->header->num_messages);
-
/* Get a pointer to a buffer that's large enough for serialized list index */
- if(NULL == (buf = (uint8_t *)H5WB_actual(wb, size)))
+ if(NULL == (buf = (uint8_t *)H5WB_actual(wb, list->header->list_size)))
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "can't get actual buffer")
/* Get temporary pointer to buffer for serialized list index */
@@ -626,12 +612,12 @@ H5SM_list_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5SM_lis
HDassert(mesgs_written == list->header->num_messages);
/* Compute checksum on buffer */
- computed_chksum = H5_checksum_metadata(buf, (size - H5SM_SIZEOF_CHECKSUM), 0);
+ computed_chksum = H5_checksum_metadata(buf, (size_t)(p - buf), 0);
UINT32ENCODE(p, computed_chksum);
/* Write the list to disk */
- HDassert((size_t)(p - buf) == size);
- if(H5F_block_write(f, H5FD_MEM_SOHM_INDEX, addr, size, dxpl_id, buf) < 0)
+ HDassert((size_t)(p - buf) <= list->header->list_size);
+ if(H5F_block_write(f, H5FD_MEM_SOHM_INDEX, addr, list->header->list_size, dxpl_id, buf) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTFLUSH, FAIL, "unable to save sohm table to disk")
list->cache_info.is_dirty = FALSE;
@@ -681,7 +667,7 @@ H5SM_list_dest(H5F_t *f, H5SM_list_t* list)
if(list->cache_info.free_file_space_on_destroy) {
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
- if(H5MF_xfree(f, H5FD_MEM_SOHM_INDEX, H5AC_dxpl_id, list->cache_info.addr, (hsize_t)H5SM_LIST_SIZE(f, list->header->list_max)) < 0)
+ if(H5MF_xfree(f, H5FD_MEM_SOHM_INDEX, H5AC_dxpl_id, list->cache_info.addr, (hsize_t)list->header->list_size) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_NOSPACE, FAIL, "unable to free shared message list")
} /* end if */
@@ -754,7 +740,7 @@ H5SM_list_size(const H5F_t UNUSED *f, const H5SM_list_t *list, size_t *size_ptr)
HDassert(size_ptr);
/* Set size value */
- *size_ptr = H5SM_LIST_SIZE(f, list->header->list_max);
+ *size_ptr = list->header->list_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5SM_list_size() */
diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h
index 3aab317..84c2bf4 100755
--- a/src/H5SMpkg.h
+++ b/src/H5SMpkg.h
@@ -62,11 +62,6 @@
+ MAX(H5SM_HEAP_LOC_SIZE, H5SM_OH_LOC_SIZE(f)) /* Entry */ \
)
-#define H5SM_TABLE_SIZE(f) ( \
- (unsigned)H5_SIZEOF_MAGIC /* Signature */ \
- + (unsigned)H5SM_SIZEOF_CHECKSUM /* Checksum */ \
- )
-
#define H5SM_INDEX_HEADER_SIZE(f) ( \
(unsigned)1 /* Whether index is a list or B-tree */ \
+ (unsigned)1 /* Version of index format */ \
@@ -77,10 +72,26 @@
+ H5F_SIZEOF_ADDR(f) /* Address of heap */ \
)
+/* Format overhead for all SOHM tree metadata in the file */
+#define H5SM_METADATA_PREFIX_SIZE ( \
+ H5_SIZEOF_MAGIC /* Signature */ \
+ + H5SM_SIZEOF_CHECKSUM /* Checksum */ \
+ )
+
+#define H5SM_TABLE_SIZE(f) ( \
+ /* General metadata fields */ \
+ H5SM_METADATA_PREFIX_SIZE \
+ \
+ /* Indices */ \
+ + ((f)->shared->sohm_nindexes * H5SM_INDEX_HEADER_SIZE(f)) \
+ )
+
#define H5SM_LIST_SIZE(f, num_mesg) ( \
- (unsigned) H5_SIZEOF_MAGIC /* Signature */ \
- + (H5SM_SOHM_ENTRY_SIZE(f) * num_mesg) /* Message entries */ \
- + (unsigned)H5SM_SIZEOF_CHECKSUM /* Checksum */ \
+ /* General metadata fields */ \
+ H5SM_METADATA_PREFIX_SIZE \
+ \
+ /* Message entries */ \
+ + (H5SM_SOHM_ENTRY_SIZE(f) * num_mesg) \
)
#define H5SM_B2_NODE_SIZE 512
@@ -154,6 +165,7 @@ typedef enum {
/* Typedef for a SOHM index header */
typedef struct {
+/* Stored */
unsigned mesg_types; /* Bit flag vector of message types */
size_t min_mesg_size; /* number of messages being tracked */
size_t list_max; /* >= this many messages, index with a B-tree */
@@ -162,6 +174,9 @@ typedef struct {
H5SM_index_type_t index_type; /* Is the index a list or a B-tree? */
haddr_t index_addr; /* Address of the actual index (list or B-tree) */
haddr_t heap_addr; /* Address of the fheap used to store shared messages */
+
+/* Not stored */
+ size_t list_size; /* Size of list index on disk */
} H5SM_index_header_t;
/* Typedef for a SOHM list */
@@ -173,12 +188,12 @@ typedef struct {
H5SM_sohm_t *messages; /* Actual list, stored as an array */
} H5SM_list_t;
-
/* Typedef for shared object header message master table */
struct H5SM_master_table_t {
/* Information for H5AC cache functions, _must_ be first field in structure */
H5AC_info_t cache_info;
+ size_t table_size; /* Size of table on disk */
unsigned num_indexes; /* Number of indexes */
H5SM_index_header_t *indexes; /* Array of num_indexes indexes */
};
@@ -219,9 +234,14 @@ typedef struct H5SM_bt2_ctx_t {
uint8_t sizeof_addr; /* Size of file addresses */
} H5SM_bt2_ctx_t;
-/* Callback info for loading a shared message index into the cache */
+/* Callback info for loading a shared message table index into the cache */
+typedef struct H5SM_table_cache_ud_t {
+ H5F_t *f; /* File that shared message index stored as a table is in */
+} H5SM_table_cache_ud_t;
+
+/* Callback info for loading a shared message list index into the cache */
typedef struct H5SM_list_cache_ud_t {
- H5F_t *f; /* File that shared message index stored as a list is in */
+ H5F_t *f; /* File that shared message index stored as a table is in */
H5SM_index_header_t *header; /* Index header for this list */
} H5SM_list_cache_ud_t;
diff --git a/src/H5SMtest.c b/src/H5SMtest.c
index 48f8ff9..5f4a89b 100644
--- a/src/H5SMtest.c
+++ b/src/H5SMtest.c
@@ -91,10 +91,14 @@ H5SM_get_mesg_count_test(H5F_t *f, hid_t dxpl_id, unsigned type_id,
/* Check for shared messages being enabled */
if(H5F_addr_defined(f->shared->sohm_addr)) {
H5SM_index_header_t *header; /* Index header for message type */
+ H5SM_table_cache_ud_t cache_udata; /* User-data for callback */
ssize_t index_num; /* Table index for message type */
+ /* Set up user data for callback */
+ cache_udata.f = f;
+
/* Look up the master SOHM table */
- if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, f, H5AC_READ)))
+ if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, f->shared->sohm_addr, &cache_udata, H5AC_READ)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index for this message type */
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index be09488..6069577 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -241,7 +241,7 @@
#define H5T_CONV_sS(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)<=sizeof(DT)); \
- H5T_CONV(H5T_CONV_xX, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_xX, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_sU_CORE(S,D,ST,DT,D_MIN,D_MAX) { \
@@ -266,7 +266,7 @@
#define H5T_CONV_sU(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)<=sizeof(DT)); \
- H5T_CONV(H5T_CONV_sU, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_sU, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_uS_CORE(S,D,ST,DT,D_MIN,D_MAX) { \
@@ -291,17 +291,17 @@
#define H5T_CONV_uS(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)<=sizeof(DT)); \
- H5T_CONV(H5T_CONV_uS, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_uS, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_uU(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)<=sizeof(DT)); \
- H5T_CONV(H5T_CONV_xX, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_xX, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_Ss(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)>=sizeof(DT)); \
- H5T_CONV(H5T_CONV_Xx, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_Xx, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_Su_CORE(S,D,ST,DT,D_MIN,D_MAX) { \
@@ -337,17 +337,17 @@
#define H5T_CONV_Su(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)>=sizeof(DT)); \
- H5T_CONV(H5T_CONV_Su, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_Su, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_Us(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)>=sizeof(DT)); \
- H5T_CONV(H5T_CONV_Ux, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_Ux, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_Uu(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)>=sizeof(DT)); \
- H5T_CONV(H5T_CONV_Ux, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_Ux, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_su_CORE(S,D,ST,DT,D_MIN,D_MAX) { \
@@ -374,7 +374,7 @@
#define H5T_CONV_su(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)==sizeof(DT)); \
- H5T_CONV(H5T_CONV_su, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_su, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_us_CORE(S,D,ST,DT,D_MIN,D_MAX) { \
@@ -401,12 +401,12 @@
#define H5T_CONV_us(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)==sizeof(DT)); \
- H5T_CONV(H5T_CONV_us, long long, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_us, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_CONV_fF(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)<=sizeof(DT)); \
- H5T_CONV(H5T_CONV_xX, long double, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_xX, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
/* Same as H5T_CONV_Xx_CORE, except that instead of using D_MAX and D_MIN
@@ -445,7 +445,7 @@
#define H5T_CONV_Ff(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
HDcompile_assert(sizeof(ST)>=sizeof(DT)); \
- H5T_CONV(H5T_CONV_Ff, long double, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_Ff, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
#define H5T_HI_LO_BIT_SET(TYP, V, LO, HI) { \
@@ -536,7 +536,7 @@
}
#define H5T_CONV_xF(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
- H5T_CONV(H5T_CONV_xF, long double, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_xF, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
/* Quincey added the condition branch (else if (*(S) != (ST)((DT)(*(S))))).
@@ -589,7 +589,7 @@
}
#define H5T_CONV_Fx(STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
- H5T_CONV(H5T_CONV_Fx, long double, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
+ H5T_CONV(H5T_CONV_Fx, STYPE, DTYPE, ST, DT, D_MIN, D_MAX) \
}
/* Since all "no exception" cores do the same thing (assign the value in the
@@ -601,7 +601,7 @@
}
/* The main part of every integer hardware conversion macro */
-#define H5T_CONV(GUTS,ATYPE,STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
+#define H5T_CONV(GUTS,STYPE,DTYPE,ST,DT,D_MIN,D_MAX) { \
size_t elmtno; /*element number */ \
size_t sprec; /*source precision */ \
size_t dprec; /*destination precision */ \
diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h
index afa6ceb..c70eea0 100644
--- a/src/H5Tprivate.h
+++ b/src/H5Tprivate.h
@@ -129,6 +129,7 @@ H5_DLL herr_t H5T_convert(H5T_path_t *tpath, hid_t src_id, hid_t dst_id,
size_t nelmts, size_t buf_stride, size_t bkg_stride, void *buf, void *bkg,
hid_t dset_xfer_plist);
H5_DLL herr_t H5T_vlen_reclaim(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point, void *_op_data);
+H5_DLL herr_t H5T_vlen_reclaim_elmt(void *elem, H5T_t *dt, hid_t dxpl_id);
H5_DLL herr_t H5T_vlen_get_alloc_info(hid_t dxpl_id, H5T_vlen_alloc_info_t **vl_alloc_info);
H5_DLL htri_t H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc);
H5_DLL htri_t H5T_is_sensible(const H5T_t *dt);
diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c
index 8a6ee05..95f4086 100644
--- a/src/H5Tvlen.c
+++ b/src/H5Tvlen.c
@@ -1303,3 +1303,44 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5T_vlen_get_alloc_info() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5T_vlen_reclaim_elmt
+ *
+ * Purpose: Alternative method to reclaim any VL data for a buffer element.
+ *
+ * Use this function when the datatype is already available, but
+ * the allocation info is needed from the dxpl_id before jumping
+ * into recursion.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mike McGreevy
+ * May 11, 2010
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5T_vlen_reclaim_elmt(void *elem, H5T_t *dt, hid_t dxpl_id)
+{
+ H5T_vlen_alloc_info_t _vl_alloc_info; /* VL allocation info buffer */
+ H5T_vlen_alloc_info_t *vl_alloc_info = &_vl_alloc_info; /* VL allocation info */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ HDassert(dt);
+ HDassert(elem);
+
+ FUNC_ENTER_NOAPI(H5T_vlen_reclaim_elmt, FAIL)
+
+ /* Get VL allocation info */
+ if (H5T_vlen_get_alloc_info(dxpl_id, &vl_alloc_info) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "unable to retrieve VL allocation info")
+
+ /* Recurse on buffer to free dynamic fields */
+ ret_value = H5T_vlen_reclaim_recurse(elem,dt,vl_alloc_info->free_func,vl_alloc_info->free_info);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5T_vlen_reclaim_elmt */
diff --git a/src/H5api_adpt.h b/src/H5api_adpt.h
index 740eab5..dbd8d94 100644
--- a/src/H5api_adpt.h
+++ b/src/H5api_adpt.h
@@ -21,6 +21,272 @@
#ifndef H5API_ADPT_H
#define H5API_ADPT_H
+/* This will only be defined if HDF5 was built with CMake */
+#ifdef H5_BUILT_AS_DYNAMIC_LIB
+
+#if defined (hdf5_EXPORTS)
+ #define _HDF5DLL_
+#else
+ #define _HDF5USEDLL_
+#endif
+
+#if defined (hdf5_test_EXPORTS)
+ #define _HDF5TESTDLL_
+#else
+ #define _HDF5TESTUSEDLL_
+#endif
+
+#if defined (hdf5_tools_EXPORTS)
+ #define _HDF5TOOLSDLL_
+#else
+ #define _HDF5TOOLSUSEDLL_
+#endif
+
+#if defined (hdf5_cpp_EXPORTS)
+ #define HDF5_CPPDLL_EXPORTS
+#else
+ #define HDF5CPP_USEDLL
+#endif
+
+#if defined (hdf5_hl_EXPORTS)
+ #define _HDF5_HLDLL_EXPORTS_
+#else
+ #define _HDF5USEHLDLL_
+#endif
+
+#if defined (hdf5_hl_cpp_EXPORTS)
+ #define HDF5_HL_CPPDLL_EXPORTS
+#else
+ #define HDF5USE_HLCPPDLL
+#endif
+
+#if defined (hdf5_f90cstub_EXPORTS)
+ #define HDF5FORT_CSTUB_DLL_EXPORTS
+#else
+ #define HDF5FORT_CSTUB_USEDLL
+#endif
+
+#if defined (hdf5_test_f90cstub_EXPORTS)
+ #define HDF5FORTTEST_CSTUB_DLL_EXPORTS
+#endif
+
+#if defined (hdf5_hl_f90cstub_EXPORTS)
+ #define HDF5_HL_F90CSTUBDLL_EXPORTS
+#endif
+
+#if defined(hdf5_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_DLL __declspec(dllexport)
+ #define H5_DLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_DLL __attribute__ ((visibility("default")))
+ #define H5_DLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_DLL __declspec(dllimport)
+ #define H5_DLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_DLL __attribute__ ((visibility("default")))
+ #define H5_DLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5_DLL
+ #define H5_DLL
+ #define H5_DLLVAR extern
+#endif /* _HDF5DLL_ */
+
+#if defined(hdf5_test_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5TEST_DLL __declspec(dllexport)
+ #define H5TEST_DLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5TEST_DLL __attribute__ ((visibility("default")))
+ #define H5TEST_DLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5TEST_DLL __declspec(dllimport)
+ #define H5TEST_DLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5TEST_DLL __attribute__ ((visibility("default")))
+ #define H5TEST_DLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5TEST_DLL
+ #define H5TEST_DLL
+ #define H5TEST_DLLVAR extern
+#endif /* H5TEST_DLL */
+
+#if defined(hdf5_tools_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5TOOLS_DLL __declspec(dllexport)
+ #define H5TOOLS_DLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5TOOLS_DLL __attribute__ ((visibility("default")))
+ #define H5TOOLS_DLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5TOOLS_DLL __declspec(dllimport)
+ #define H5TOOLS_DLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5TOOLS_DLL __attribute__ ((visibility("default")))
+ #define H5TOOLS_DLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5TOOLS_DLL
+ #define H5TOOLS_DLL
+ #define H5TOOLS_DLLVAR extern
+#endif /* H5TOOLS_DLL */
+
+#if defined(hdf5_cpp_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_DLLCPP __declspec(dllexport)
+ #define H5_DLLCPPVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_DLLCPP __attribute__ ((visibility("default")))
+ #define H5_DLLCPPVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_DLLCPP __declspec(dllimport)
+ #define H5_DLLCPPVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_DLLCPP __attribute__ ((visibility("default")))
+ #define H5_DLLCPPVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5_DLLCPP
+ #define H5_DLLCPP
+ #define H5_DLLCPPVAR extern
+#endif /* H5_DLLCPP */
+
+#if defined(hdf5_hl_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_HLDLL __declspec(dllexport)
+ #define H5_HLDLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_HLDLL __attribute__ ((visibility("default")))
+ #define H5_HLDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_HLDLL __declspec(dllimport)
+ #define H5_HLDLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_HLDLL __attribute__ ((visibility("default")))
+ #define H5_HLDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5_HLDLL
+ #define H5_HLDLL
+ #define H5_HLDLLVAR extern
+#endif /* H5_HLDLL */
+
+#if defined(hdf5_hl_cpp_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_HLCPPDLL __declspec(dllexport)
+ #define H5_HLCPPDLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_HLCPPDLL __attribute__ ((visibility("default")))
+ #define H5_HLCPPDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_HLCPPDLL __declspec(dllimport)
+ #define H5_HLCPPDLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_HLCPPDLL __attribute__ ((visibility("default")))
+ #define H5_HLCPPDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5_HLCPPDLL
+ #define H5_HLCPPDLL
+ #define H5_HLCPPDLLVAR extern
+#endif /* H5_HLCPPDLL */
+
+#if defined(hdf5_f90cstub_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_FCDLL __declspec(dllexport)
+ #define H5_FCDLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_FCDLL __attribute__ ((visibility("default")))
+ #define H5_FCDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_FCDLL __declspec(dllimport)
+ #define H5_FCDLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_FCDLL __attribute__ ((visibility("default")))
+ #define H5_FCDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5_FCDLL
+ #define H5_FCDLL
+ #define H5_FCDLLVAR extern
+#endif /* H5_FCDLL */
+
+#if defined(hdf5_f90Ctest_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_FCTESTDLL __declspec(dllexport)
+ #define H5_FCTESTDLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_FCTESTDLL __attribute__ ((visibility("default")))
+ #define H5_FCTESTDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define H5_FCTESTDLL __declspec(dllimport)
+ #define H5_FCTESTDLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define H5_FCTESTDLL __attribute__ ((visibility("default")))
+ #define H5_FCTESTDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef H5_FCTESTDLL
+ #define H5_FCTESTDLL
+ #define H5_FCTESTDLLVAR extern
+#endif /* H5_FCTESTDLL */
+
+#if defined(hdf5_hl_f90cstub_EXPORTS)
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define HDF5_HL_F90CSTUBDLL __declspec(dllexport)
+ #define HDF5_HL_F90CSTUBDLLVAR extern __declspec(dllexport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define HDF5_HL_F90CSTUBDLL __attribute__ ((visibility("default")))
+ #define HDF5_HL_F90CSTUBDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#else
+ #if defined (_MSC_VER) /* MSVC Compiler Case */
+ #define HDF5_HL_F90CSTUBDLL __declspec(dllimport)
+ #define HDF5_HL_F90CSTUBDLLVAR __declspec(dllimport)
+ #elif (__GNUC__ >= 4) /* GCC 4.x has support for visibility options */
+ #define HDF5_HL_F90CSTUBDLL __attribute__ ((visibility("default")))
+ #define HDF5_HL_F90CSTUBDLLVAR extern __attribute__ ((visibility("default")))
+ #endif
+#endif
+
+#ifndef HDF5_HL_F90CSTUBDLL
+ #define HDF5_HL_F90CSTUBDLL
+ #define HDF5_HL_F90CSTUBDLLVAR extern
+#endif /* HDF5_HL_F90CSTUBDLL */
+
+#else
+/* This is the original HDFGroup defined preprocessor code which should still work
+ * with the VS projects that are maintained by "The HDF Group"
+ * This will be removed after the next release.
+ */
+
#if defined(_WIN32)
#if defined(_HDF5DLL_)
@@ -47,6 +313,18 @@
#define H5TEST_DLLVAR extern
#endif /* _HDF5TESTDLL_ */
+#if defined(_HDF5TOOLSDLL_)
+#pragma warning(disable: 4273) /* Disable the dll linkage warnings */
+#define H5TOOLS_DLL __declspec(dllexport)
+#define H5TOOLS_DLLVAR extern __declspec(dllexport)
+#elif defined(_HDF5TOOLSUSEDLL_)
+#define H5TOOLS_DLL __declspec(dllimport)
+#define H5TOOLS_DLLVAR __declspec(dllimport)
+#else
+#define H5TOOLS_DLL
+#define H5TOOLS_DLLVAR extern
+#endif /* _HDF5TOOLSDLL_ */
+
#if defined(_HDF5_HLDLL_EXPORTS_)
#pragma warning(disable: 4273) /* Disable the dll linkage warnings */
#define H5_HLDLL __declspec(dllexport)
@@ -117,6 +395,8 @@
#define H5_DLLCPP
#define H5TEST_DLL
#define H5TEST_DLLVAR extern
+#define H5TOOLS_DLL
+#define H5TOOLS_DLLVAR extern
#define H5_FCDLL
#define H5_FCDLLVAR extern
#define H5_FCTESTDLL
@@ -125,3 +405,4 @@
#endif /* H5API_ADPT_H */
+#endif /* */
diff --git a/src/H5config.h.in b/src/H5config.h.in
index 8210f38..5c9e1f2 100644
--- a/src/H5config.h.in
+++ b/src/H5config.h.in
@@ -676,6 +676,12 @@
# endif
#endif
+/* Number of bits in a file offset, on hosts where this is settable. */
+#undef _FILE_OFFSET_BITS
+
+/* Define for large files, on AIX-style hosts. */
+#undef _LARGE_FILES
+
/* Define to empty if `const' does not conform to ANSI C. */
#undef const
diff --git a/src/H5detect.c b/src/H5detect.c
index 4490fb5..29955a1 100644
--- a/src/H5detect.c
+++ b/src/H5detect.c
@@ -119,9 +119,9 @@ static volatile int nd_g = 0, na_g = 0;
static void print_results(int nd, detected_t *d, int na, malign_t *m);
static void iprint(detected_t *);
static int byte_cmp(int, const void *, const void *);
-static int bit_cmp(int, int *, void *, void *);
+static int bit_cmp(int, int *, volatile void *, volatile void *);
static void fix_order(int, int, int, int *, const char **);
-static int imp_bit(int, int *, void *, void *);
+static int imp_bit(int, int *, volatile void *, volatile void *);
static unsigned long find_bias(int, int, int *, void *);
static void precision (detected_t*);
static void print_header(void);
@@ -279,6 +279,9 @@ precision (detected_t *d)
* absence of implicit mantissa bit, and exponent bias and
* initializes a detected_t structure with those properties.
*
+ * Note: 'volatile' is used for the variables below to prevent the
+ * compiler from optimizing them away.
+ *
* Return: void
*
* Programmer: Robb Matzke
@@ -927,11 +930,11 @@ byte_cmp(int n, const void *_a, const void *_b)
*-------------------------------------------------------------------------
*/
static int
-bit_cmp(int nbytes, int *perm, void *_a, void *_b)
+bit_cmp(int nbytes, int *perm, volatile void *_a, volatile void *_b)
{
int i, j;
- unsigned char *a = (unsigned char *) _a;
- unsigned char *b = (unsigned char *) _b;
+ volatile unsigned char *a = (volatile unsigned char *) _a;
+ volatile unsigned char *b = (volatile unsigned char *) _b;
unsigned char aa, bb;
for (i = 0; i < nbytes; i++) {
@@ -1048,10 +1051,10 @@ fix_order(int n, int first, int last, int *perm, const char **mesg)
*-------------------------------------------------------------------------
*/
static int
-imp_bit(int n, int *perm, void *_a, void *_b)
+imp_bit(int n, int *perm, volatile void *_a, volatile void *_b)
{
- unsigned char *a = (unsigned char *) _a;
- unsigned char *b = (unsigned char *) _b;
+ volatile unsigned char *a = (volatile unsigned char *) _a;
+ volatile unsigned char *b = (volatile unsigned char *) _b;
int changed, major, minor;
int msmb; /*most significant mantissa bit */
diff --git a/src/H5err.txt b/src/H5err.txt
index 6ab1351..71d27a6 100644
--- a/src/H5err.txt
+++ b/src/H5err.txt
@@ -162,7 +162,6 @@ MINOR, CACHE, H5E_PROTECT, Protected metadata error
MINOR, CACHE, H5E_NOTCACHED, Metadata not currently cached
MINOR, CACHE, H5E_SYSTEM, Internal error detected
MINOR, CACHE, H5E_CANTINS, Unable to insert metadata into cache
-MINOR, CACHE, H5E_CANTRENAME, Unable to rename metadata
MINOR, CACHE, H5E_CANTPROTECT, Unable to protect metadata
MINOR, CACHE, H5E_CANTUNPROTECT, Unable to unprotect metadata
MINOR, CACHE, H5E_CANTPIN, Unable to pin cache entry
@@ -197,6 +196,7 @@ MINOR, OHDR, H5E_CANTDELETE, Can't delete message
MINOR, OHDR, H5E_BADITER, Iteration failed
MINOR, OHDR, H5E_CANTPACK, Can't pack messages
MINOR, OHDR, H5E_CANTRESET, Can't reset object
+MINOR, OHDR, H5E_CANTRENAME, Unable to rename object
# Group related errors
MINOR, GROUP, H5E_CANTOPENOBJ, Can't open object
@@ -225,7 +225,7 @@ MINOR, PLIST, H5E_DUPCLASS, Duplicate class name in parent class
MINOR, LINK, H5E_TRAVERSE, Link traversal failure
MINOR, LINK, H5E_NLINKS, Too many soft links in path
MINOR, LINK, H5E_NOTREGISTERED, Link class not registered
-MINOR, LINK, H5E_CANTMOVE, Move callback returned error
+MINOR, LINK, H5E_CANTMOVE, Can't move object
MINOR, LINK, H5E_CANTSORT, Can't sort objects
# Parallel MPI errors
diff --git a/src/H5private.h b/src/H5private.h
index e18bec6..5b8d123 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -142,7 +142,11 @@
#ifdef _WIN32
-#define VC_EXTRALEAN /*Exclude rarely-used stuff from Windows headers */
+#ifdef H5_HAVE_WINSOCK_H
+#include <winsock2.h>
+#endif
+
+#define WIN32_LEAN_AND_MEAN /*Exclude rarely-used stuff from Windows headers */
#include <windows.h>
#include <direct.h> /* For _getcwd() */
diff --git a/src/H5public.h b/src/H5public.h
index d17de1b..e07c4e3 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -71,10 +71,10 @@ extern "C" {
/* Version numbers */
#define H5_VERS_MAJOR 1 /* For major interface/format changes */
#define H5_VERS_MINOR 9 /* For minor interface/format changes */
-#define H5_VERS_RELEASE 69 /* For tweaks, bug-fixes, or development */
+#define H5_VERS_RELEASE 73 /* For tweaks, bug-fixes, or development */
#define H5_VERS_SUBRELEASE "FA_a4" /* For pre-releases like snap0 */
/* Empty string for real releases. */
-#define H5_VERS_INFO "HDF5 library version: 1.9.69-FA_a4" /* Full version string */
+#define H5_VERS_INFO "HDF5 library version: 1.9.73-FA_a4" /* Full version string */
#define H5check() H5check_version(H5_VERS_MAJOR,H5_VERS_MINOR, \
H5_VERS_RELEASE)
diff --git a/src/Makefile.in b/src/Makefile.in
index 7ffd5a5..0e3d148 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -276,12 +276,12 @@ INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
INSTRUMENT = @INSTRUMENT@
INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@
+LARGEFILE = @LARGEFILE@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
-LINUX_LFS = @LINUX_LFS@
LIPO = @LIPO@
LL_PATH = @LL_PATH@
LN_S = @LN_S@
@@ -445,7 +445,7 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog
# Add libtool shared library version numbers to the HDF5 library
# See libtool versioning documentation online.
LT_VERS_INTERFACE = 6
-LT_VERS_REVISION = 59
+LT_VERS_REVISION = 63
LT_VERS_AGE = 0
H5detect_CFLAGS = -g $(AM_CFLAGS)
diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in
index a3032e5..ba233e8 100644
--- a/src/libhdf5.settings.in
+++ b/src/libhdf5.settings.in
@@ -65,4 +65,4 @@ Clear file buffers before write: @CLEARFILEBUF@
GPFS: @GPFS@
Strict File Format Checks: @STRICT_FORMAT_CHECKS@
Optimization Instrumentation: @INSTRUMENT@
- Linux Large File Support (LFS): @LINUX_LFS@
+ Large File Support (LFS): @LARGEFILE@