diff options
75 files changed, 8706 insertions, 860 deletions
diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake index bb5f046..8dcaf85 100644 --- a/CMakeInstallation.cmake +++ b/CMakeInstallation.cmake @@ -530,6 +530,7 @@ The HDF5 data model, file format, API, library, and tools are open and distribut ) cpack_add_component (configinstall DISPLAY_NAME "HDF5 CMake files" + HIDDEN DEPENDS libraries GROUP Development INSTALL_TYPES Full Developer User @@ -500,12 +500,15 @@ ./src/H5CSprivate.h ./src/H5D.c ./src/H5Dbtree.c +./src/H5Dbtree2.c ./src/H5Dchunk.c ./src/H5Dcompact.c ./src/H5Dcontig.c ./src/H5Ddbg.c ./src/H5Ddeprec.c +./src/H5Dearray.c ./src/H5Defl.c +./src/H5Dfarray.c ./src/H5Dfill.c ./src/H5Dint.c ./src/H5Dio.c diff --git a/config/cmake/CTestCustom.cmake b/config/cmake/CTestCustom.cmake index 41cb488..85f7d27 100644 --- a/config/cmake/CTestCustom.cmake +++ b/config/cmake/CTestCustom.cmake @@ -46,6 +46,7 @@ set (CTEST_CUSTOM_MEMCHECK_IGNORE H5DUMP-clearall-objects H5DUMP_PACKED_BITS-clearall-objects H5DUMP-XML-clearall-objects + H5DUMP_VDS-clearall-objects ######### tools/h5import ######### H5IMPORT-clear-objects ######### tools/h5jam ######### diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake index 49e9a05..ec0aecf 100644 --- a/config/cmake/ConfigureChecks.cmake +++ b/config/cmake/ConfigureChecks.cmake @@ -3,7 +3,10 @@ #----------------------------------------------------------------------------- set (HDF_PREFIX "H5") include (${HDF_RESOURCES_EXT_DIR}/ConfigureChecks.cmake) -include (${CMAKE_ROOT}/Modules/TestForSTDNamespace.cmake) + +if (HDF5_ENABLE_USING_MEMCHECKER) + set (H5_USING_MEMCHECKER 1) +endif (HDF5_ENABLE_USING_MEMCHECKER) #----------------------------------------------------------------------------- # Option for --enable-strict-format-checks diff --git a/fortran/src/H5_buildiface.F90 b/fortran/src/H5_buildiface.F90 index bd6ecc3..4b00d80 100644 --- a/fortran/src/H5_buildiface.F90 +++ b/fortran/src/H5_buildiface.F90 @@ -397,7 +397,7 @@ PROGRAM H5_buildiface WRITE(11,'(A)') ' INTEGER(HID_T) , INTENT(IN) :: attr_id' WRITE(11,'(A)') ' INTEGER(HID_T) , INTENT(IN) :: memtype_id' WRITE(11,'(A)') ' INTEGER(HSIZE_T) , INTENT(IN), DIMENSION(*) :: dims' - WRITE(11,'(A)') ' INTEGER(KIND='//TRIM(ADJUSTL(chr2))//'),INTENT(INOUT)'//TRIM(rank_dim_line(j))//', TARGET :: buf' + WRITE(11,'(A)') ' INTEGER(KIND='//TRIM(ADJUSTL(chr2))//'),INTENT(IN)'//TRIM(rank_dim_line(j))//', TARGET :: buf' WRITE(11,'(A)') ' INTEGER , INTENT(OUT) :: hdferr' WRITE(11,'(A)') ' TYPE(C_PTR) :: f_ptr' diff --git a/hl/fortran/src/H5LTf90proto.h b/hl/fortran/src/H5LTf90proto.h index 20d043e..77f941e 100644 --- a/hl/fortran/src/H5LTf90proto.h +++ b/hl/fortran/src/H5LTf90proto.h @@ -311,6 +311,38 @@ h5tbmake_table_c(size_t_f *namelen1, HDF5_HL_F90CSTUBDLL int_f +h5tbread_table_c(hid_t_f *loc_id, + _fcd name, + size_t_f *namelen, + hsize_t_f *nfields, + size_t_f *dst_size, + size_t_f *dst_offset, + size_t_f *dst_sizes, + void *dst_buf); + + +HDF5_HL_F90CSTUBDLL +int_f +h5tbmake_table_ptr_c(size_t_f *namelen1, + _fcd name1, + hid_t_f *loc_id, + size_t_f *namelen, + _fcd name, + hsize_t_f *nfields, + hsize_t_f *nrecords, + size_t_f *type_size, + size_t_f *field_offset, + hid_t_f *field_types, + hsize_t_f *chunk_size, + void *fill_data, + int_f *compress, + size_t_f *char_len_field_names, /* field_names lenghts */ + size_t_f *max_char_size_field_names, /* char len of fields */ + char *field_names, /* field_names */ + void *data); + +HDF5_HL_F90CSTUBDLL +int_f h5tbwrite_field_name_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, diff --git a/hl/fortran/src/H5LTff.F90 b/hl/fortran/src/H5LTff.F90 index d36d92c..18c36f0 100644 --- a/hl/fortran/src/H5LTff.F90 +++ b/hl/fortran/src/H5LTff.F90 @@ -110,7 +110,7 @@ MODULE H5LT_CONST CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dtype ! flag indicating the datatype of the ! the buffer: ! R=Real, D=DOUBLE, I=Interger - INTEGER(size_t) :: SizeOf_buf ! Sizeof the buf datatype + INTEGER(size_t), INTENT(in) :: SizeOf_buf ! Sizeof the buf data type END FUNCTION h5ltget_attribute_c END INTERFACE @@ -1059,14 +1059,16 @@ CONTAINS CHARACTER(LEN=*), INTENT(in) :: dset_name ! name of the dataset CHARACTER(LEN=*), INTENT(in) :: attr_name ! name of the attribute TYPE(C_PTR) :: buf ! data buffer - CHARACTER(LEN=*), INTENT(in) :: buf_type ! + CHARACTER(LEN=*), INTENT(in) :: buf_type ! valid data types are: + ! CHARACTER, INTEGER or REAL + ! NOTE: only the first character matters and is case insensitive INTEGER(size_t), INTENT(in) :: size ! size of attribute array - INTEGER :: errcode ! error code - INTEGER(size_t) :: namelen ! name length - INTEGER(size_t) :: attrlen ! name length + INTEGER(size_t), INTENT(in) :: SizeOf_buf_type ! size of buf's data type + INTEGER, INTENT(out) :: errcode ! error code - CHARACTER(KIND=C_CHAR) :: buf_type_uppercase - INTEGER(size_t) :: SizeOf_buf_type + INTEGER(size_t) :: namelen ! name length + INTEGER(size_t) :: attrlen ! name length + CHARACTER(KIND=C_CHAR) :: buf_type_uppercase namelen = LEN(dset_name) attrlen = LEN(attr_name) @@ -1316,13 +1318,15 @@ CONTAINS INTEGER(hid_t), INTENT(in) :: loc_id ! file or group identifier CHARACTER(LEN=*), INTENT(in) :: dset_name ! name of the dataset CHARACTER(LEN=*), INTENT(in) :: attr_name ! name of the attribute - INTEGER, INTENT(out) :: errcode ! error code - CHARACTER(LEN=*), INTENT(in) :: buf_type - TYPE(C_PTR) :: buf! data buffer + TYPE(C_PTR) :: buf ! data buffer + CHARACTER(LEN=*), INTENT(in) :: buf_type ! valid data types are: + ! CHARACTER, INTEGER or REAL + ! NOTE: only the first character matters and is case insensitive + INTEGER(size_t), INTENT(in) :: SizeOf_buf_type ! size of buf's data type + INTEGER, INTENT(out) :: errcode ! error code INTEGER(size_t) :: namelen ! name length - INTEGER(size_t) :: attrlen ! name length + INTEGER(size_t) :: attrlen ! attr length CHARACTER(KIND=C_CHAR) :: buf_type_uppercase - INTEGER(size_t) :: SizeOf_buf_type namelen = LEN(dset_name) attrlen = LEN(attr_name) diff --git a/hl/fortran/src/H5TBfc.c b/hl/fortran/src/H5TBfc.c index 99a7800..2bb7c3b 100644 --- a/hl/fortran/src/H5TBfc.c +++ b/hl/fortran/src/H5TBfc.c @@ -37,21 +37,12 @@ *------------------------------------------------------------------------- */ int_f -h5tbmake_table_c(size_t_f *namelen1, - _fcd name1, - hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - hsize_t_f *nfields, - hsize_t_f *nrecords, - size_t_f *type_size, - size_t_f *field_offset, - hid_t_f *field_types, - hsize_t_f *chunk_size, - int_f *compress, - size_t_f *char_len_field_names, /* field_names lenghts */ - size_t_f *max_char_size_field_names, /* char len of fields */ - char *field_names) /* field_names */ +h5tbmake_table_c(size_t_f *namelen1, _fcd name1, hid_t_f *loc_id, size_t_f *namelen, _fcd name, + hsize_t_f *nfields, hsize_t_f *nrecords, size_t_f *type_size, size_t_f *field_offset, + hid_t_f *field_types, hsize_t_f *chunk_size, int_f *compress, + size_t_f *char_len_field_names, /* field_names lenghts */ + size_t_f *max_char_size_field_names, /* char len of fields */ + char *field_names) /* field_names */ { char *c_name = NULL; char *c_name1 = NULL; @@ -101,7 +92,6 @@ h5tbmake_table_c(size_t_f *namelen1, HGOTO_DONE(FAIL) HDmemcpy(c_field_names[i], tmp_p, (size_t)char_len_field_names[i]); c_field_names[i][char_len_field_names[i]] = '\0'; - tmp_p = tmp_p + *max_char_size_field_names; } /* end for */ @@ -136,6 +126,177 @@ done: } /* end h5tbmake_table_c() */ /*------------------------------------------------------------------------- +* Function: h5tbmake_table_ptr_c +* +* Purpose: Call H5TBmake_table using F2003 features +* +* Return: Success: 0, Failure: -1 +* +* Programmer: M. Scot Breitenfeld +* +* Date: Sept. 10, 2015 +* +* Comments: +* +*------------------------------------------------------------------------- +*/ +int_f +h5tbmake_table_ptr_c(size_t_f *namelen1, _fcd name1, hid_t_f *loc_id, size_t_f *namelen, + _fcd name, hsize_t_f *nfields, hsize_t_f *nrecords, size_t_f *type_size, + size_t_f *field_offset, hid_t_f *field_types, hsize_t_f *chunk_size, + void *fill_data, int_f *compress, + size_t_f *char_len_field_names, /* field_names lenghts */ + size_t_f *max_char_size_field_names, /* char len of fields */ + char *field_names, + void *data) /* field_names */ +{ + char *c_name = NULL; + char *c_name1 = NULL; + hsize_t num_elem; + hsize_t i; + hsize_t c_nfields = (hsize_t)*nfields; + size_t *c_field_offset = NULL; + hid_t *c_field_types = NULL; + char **c_field_names = NULL; + char *tmp = NULL, *tmp_p; + int_f ret_value = 0; + + num_elem = (hsize_t)*nfields; + + /* + * convert FORTRAN name to C name + */ + if(NULL == (c_name = (char *)HD5f2cstring(name, (size_t)*namelen))) + HGOTO_DONE(FAIL) + if(NULL == (c_name1 = (char *)HD5f2cstring(name1, (size_t)*namelen1))) + HGOTO_DONE(FAIL) + if(NULL == (c_field_offset = (size_t *)HDmalloc(sizeof(size_t) * (size_t)c_nfields))) + HGOTO_DONE(FAIL) + if(NULL == (c_field_types = (hid_t *)HDmalloc(sizeof(hid_t) * (size_t)c_nfields))) + HGOTO_DONE(FAIL) + + for(i = 0; i < num_elem; i++) { + c_field_offset[i] = (size_t)field_offset[i]; + c_field_types[i] = field_types[i]; + } /* end for */ + + /* + * allocate array of character pointers + */ + if(NULL == (c_field_names = (char **)HDcalloc((size_t)num_elem, sizeof(char *)))) + HGOTO_DONE(FAIL) + + /* copy data to long C string */ + if(NULL == (tmp = (char *)HD5f2cstring(field_names, (size_t)*(max_char_size_field_names)*(size_t)num_elem))) + HGOTO_DONE(FAIL) + /* + * move data from temorary buffer + */ + tmp_p = tmp; + for(i = 0; i < num_elem; i++) { + if(NULL == (c_field_names[i] = (char *)HDmalloc((size_t)char_len_field_names[i] + 1))) + HGOTO_DONE(FAIL) + HDmemcpy(c_field_names[i], tmp_p, (size_t)char_len_field_names[i]); + c_field_names[i][char_len_field_names[i]] = '\0'; + tmp_p = tmp_p + *max_char_size_field_names; + } /* end for */ + + /* + * call H5TBmake_table function. + */ + if(H5TBmake_table(c_name1, (hid_t)*loc_id, c_name, c_nfields, (hsize_t)*nrecords, + (size_t)*type_size, (const char **)c_field_names, c_field_offset, c_field_types, + (hsize_t)*chunk_size, fill_data, *compress, data) < 0) + HGOTO_DONE(FAIL) + +done: + if(c_name) + HDfree(c_name); + if(c_name1) + HDfree(c_name1); + if(c_field_names) { + for(i = 0; i < num_elem; i++) { + if(c_field_names[i]) + HDfree(c_field_names[i]); + } /* end for */ + HDfree(c_field_names); + } /* end if */ + if(tmp) + HDfree(tmp); + if(c_field_offset) + HDfree(c_field_offset); + if(c_field_types) + HDfree(c_field_types); + + return ret_value; +} /* end h5tbmake_table_c() */ + + +/*------------------------------------------------------------------------- +* Function: h5tbread_table_c +* +* Purpose: Call H5TBread_table using F2003 features +* +* Return: Success: 0, Failure: -1 +* +* Programmer: M. Scot Breitenfeld +* +* Date: Sept. 14, 2015 +* +* Comments: +* +*------------------------------------------------------------------------- +*/ +int_f +h5tbread_table_c(hid_t_f *loc_id, _fcd name, size_t_f *namelen, hsize_t_f *nfields, + size_t_f *dst_size, size_t_f *dst_offset, size_t_f *dst_sizes, void *dst_buf) +{ + char *c_name = NULL; + size_t *c_dst_offset = NULL; + size_t *c_dst_sizes = NULL; + hsize_t c_nfields = (hsize_t)*nfields; + int_f ret_value = 0; + hsize_t i; + + /* + * convert FORTRAN name to C name + */ + if(NULL == (c_name = (char *)HD5f2cstring(name, (size_t)*namelen))) + HGOTO_DONE(FAIL) + + if(NULL == (c_dst_offset = (size_t *)HDmalloc(sizeof(size_t) * (size_t)c_nfields))) + HGOTO_DONE(FAIL) + if(NULL == (c_dst_sizes = (size_t *)HDmalloc(sizeof(size_t) * (size_t)c_nfields))) + HGOTO_DONE(FAIL) + + for(i = 0; i < c_nfields; i++) { + c_dst_offset[i] = (size_t)dst_offset[i]; + c_dst_sizes[i] = (size_t)dst_sizes[i]; + } /* end for */ + + /* + * call H5TBread_table function. + */ + if(H5TBread_table( (hid_t)*loc_id, c_name, (size_t)*dst_size, c_dst_offset, + c_dst_sizes, dst_buf) < 0) + HGOTO_DONE(FAIL) + +done: + if(c_name) + HDfree(c_name); + + if(c_dst_offset) + HDfree(c_dst_offset); + if(c_dst_sizes) + HDfree(c_dst_sizes); + + return ret_value; +} /* end h5tbmake_table_c() */ + + + + +/*------------------------------------------------------------------------- * Function: h5tbwrite_field_name_c * * Purpose: Call H5TBwrite_fields_name @@ -151,15 +312,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbwrite_field_name_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - size_t_f *namelen1, - _fcd field_name, - hsize_t_f *start, - hsize_t_f *nrecords, - size_t_f *type_size, - void *buf) +h5tbwrite_field_name_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f *namelen1, _fcd field_name, + hsize_t_f *start, hsize_t_f *nrecords, size_t_f *type_size, void *buf) { char *c_name = NULL; char *c_name1 = NULL; @@ -207,15 +361,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbread_field_name_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - size_t_f *namelen1, - _fcd field_name, - hsize_t_f *start, - hsize_t_f *nrecords, - size_t_f *type_size, - void *buf) +h5tbread_field_name_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f *namelen1, _fcd field_name, + hsize_t_f *start, hsize_t_f *nrecords, size_t_f *type_size, void *buf) { char *c_name = NULL; char *c_name1 = NULL; @@ -262,14 +409,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbwrite_field_index_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - int_f *field_index, - hsize_t_f *start, - hsize_t_f *nrecords, - size_t_f *type_size, - void *buf) +h5tbwrite_field_index_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *field_index, hsize_t_f *start, + hsize_t_f *nrecords, size_t_f *type_size, void *buf) { char *c_name = NULL; size_t c_type_size = *type_size; @@ -313,14 +454,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbread_field_index_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - int_f *field_index, - hsize_t_f *start, - hsize_t_f *nrecords, - size_t_f *type_size, - void *buf) +h5tbread_field_index_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, int_f *field_index, hsize_t_f *start, + hsize_t_f *nrecords, size_t_f *type_size, void *buf) { char *c_name = NULL; size_t c_type_size = *type_size; @@ -363,14 +498,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbinsert_field_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - size_t_f *namelen1, - _fcd field_name, - hid_t_f *field_type, - int_f *position, - void *buf) +h5tbinsert_field_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, size_t_f *namelen1, + _fcd field_name, hid_t_f *field_type, int_f *position, void *buf) { char *c_name = NULL; char *c_name1 = NULL; @@ -416,11 +545,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbdelete_field_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - size_t_f *namelen1, - _fcd field_name) +h5tbdelete_field_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, + size_t_f *namelen1, _fcd field_name) { char *c_name = NULL; char *c_name1 = NULL; @@ -465,11 +591,8 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbget_table_info_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - hsize_t_f *nfields, - hsize_t_f *nrecords) +h5tbget_table_info_c(hid_t_f *loc_id, size_t_f *namelen, + _fcd name, hsize_t_f *nfields, hsize_t_f *nrecords) { char *c_name = NULL; hsize_t c_nfields; @@ -515,17 +638,12 @@ done: *------------------------------------------------------------------------- */ int_f -h5tbget_field_info_c(hid_t_f *loc_id, - size_t_f *namelen, - _fcd name, - hsize_t_f *nfields, - size_t_f *field_sizes, - size_t_f *field_offsets, - size_t_f *type_size, - size_t_f *namelen2, /* field_names lenghts */ - size_t_f *lenmax, /* character len max */ - _fcd field_names, /* field_names */ - size_t_f *maxlen_out) +h5tbget_field_info_c(hid_t_f *loc_id, size_t_f *namelen, _fcd name, hsize_t_f *nfields, + size_t_f *field_sizes, size_t_f *field_offsets, size_t_f *type_size, + size_t_f *namelen2, /* field_names lenghts */ + size_t_f *lenmax, /* character len max */ + _fcd field_names, /* field_names */ + size_t_f *maxlen_out) { char *c_name = NULL; diff --git a/hl/fortran/src/H5TBff.F90 b/hl/fortran/src/H5TBff.F90 index 448d607..266f74a 100644 --- a/hl/fortran/src/H5TBff.F90 +++ b/hl/fortran/src/H5TBff.F90 @@ -63,6 +63,11 @@ MODULE h5tb_CONST MODULE PROCEDURE h5tbinsert_field_f_string END INTERFACE + INTERFACE h5tbmake_table_f + MODULE PROCEDURE h5tbmake_table_f90 + MODULE PROCEDURE h5tbmake_table_ptr_f + END INTERFACE + INTERFACE INTEGER FUNCTION h5tbwrite_field_name_c(loc_id,namelen,dset_name,namelen1,field_name,& start,nrecords,type_size,buf) & @@ -163,7 +168,7 @@ MODULE h5tb_CONST CONTAINS !------------------------------------------------------------------------- -! Function: h5tbmake_table_f +! Function: h5tbmake_table_f90 ! ! Purpose: Make a table ! @@ -179,7 +184,7 @@ CONTAINS ! !------------------------------------------------------------------------- - SUBROUTINE h5tbmake_table_f(table_title,& + SUBROUTINE h5tbmake_table_f90(table_title,& loc_id,& dset_name,& nfields,& @@ -259,23 +264,162 @@ CONTAINS max_char_size_field_names = LEN(field_names(1)) - errcode = h5tbmake_table_c(namelen1,& - table_title,& - loc_id,& - namelen,& - dset_name,& - nfields,& - nrecords,& - type_size,& - field_offset,& - field_types,& - chunk_size,& - compress,& - char_len_field_names, & - max_char_size_field_names, & - field_names) - - END SUBROUTINE h5tbmake_table_f + errcode = h5tbmake_table_c(namelen1, table_title, loc_id, namelen, dset_name, nfields, nrecords,& + type_size, field_offset, field_types, chunk_size, compress, char_len_field_names, & + max_char_size_field_names, field_names) + + END SUBROUTINE h5tbmake_table_f90 + + SUBROUTINE h5tbmake_table_ptr_f(table_title,& + loc_id,& + dset_name,& + nfields,& + nrecords,& + type_size,& + field_names,& + field_offset,& + field_types,& + chunk_size,& + fill_data,& + compress,& + data,& + errcode ) + + USE ISO_C_BINDING + IMPLICIT NONE + CHARACTER(LEN=*), INTENT(in) :: table_title ! name of the dataset + INTEGER(hid_t), INTENT(in) :: loc_id ! file or group identifier + CHARACTER(LEN=*), INTENT(in) :: dset_name ! name of the dataset + INTEGER(hsize_t), INTENT(in) :: nfields ! fields + INTEGER(hsize_t), INTENT(in) :: nrecords ! records + INTEGER(size_t), INTENT(in) :: type_size ! type size + CHARACTER(LEN=*), DIMENSION(1:nfields), INTENT(in) :: field_names ! field names + INTEGER(size_t), DIMENSION(1:nfields), INTENT(in) :: field_offset ! field offset + INTEGER(hid_t), DIMENSION(1:nfields), INTENT(in) :: field_types ! field types + INTEGER(hsize_t), INTENT(in) :: chunk_size ! chunk size + TYPE(C_PTR), INTENT(in) :: fill_data ! Fill values data + INTEGER, INTENT(in) :: compress ! compress + TYPE(C_PTR), INTENT(in) :: data ! Buffer with data to be written to the table + INTEGER(size_t) :: namelen ! name length + INTEGER(size_t) :: namelen1 ! name length + INTEGER :: errcode ! error code + INTEGER(size_t), DIMENSION(1:nfields) :: char_len_field_names ! field name lengths + INTEGER(size_t) :: max_char_size_field_names ! character len of field names + INTEGER(hsize_t) :: i ! general purpose integer + + INTERFACE + INTEGER FUNCTION h5tbmake_table_ptr_c(namelen1,& + table_title,& + loc_id,& + namelen,& + dset_name,& + nfields,& + nrecords,& + type_size,& + field_offset,& + field_types,& + chunk_size,& + fill_data,& + compress,& + char_len_field_names,& + max_char_size_field_names,& + field_names,& + data) & + BIND(C,NAME='h5tbmake_table_ptr_c') + IMPORT :: C_CHAR, C_PTR + IMPORT :: HID_T, SIZE_T, HSIZE_T + IMPLICIT NONE + CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: table_title ! name of the dataset + INTEGER(hid_t), INTENT(in) :: loc_id ! file or group identifier + CHARACTER(KIND=C_CHAR), DIMENSION(*), INTENT(in) :: dset_name ! name of the dataset + INTEGER(hsize_t), INTENT(in) :: nfields ! fields + INTEGER(hsize_t), INTENT(in) :: nrecords ! records + INTEGER(size_t), INTENT(in) :: type_size ! type size + CHARACTER(KIND=C_CHAR), DIMENSION(nfields), INTENT(in) :: field_names ! field names + INTEGER(size_t), DIMENSION(nfields), INTENT(in) :: field_offset ! field offset + INTEGER(hid_t), DIMENSION(nfields), INTENT(in) :: field_types ! field types + INTEGER(hsize_t), INTENT(in) :: chunk_size ! chunk size + TYPE(C_PTR), INTENT(in), VALUE :: fill_data ! Fill values data + INTEGER, INTENT(in) :: compress ! compress + INTEGER(size_t) :: namelen ! name length + INTEGER(size_t) :: namelen1 ! name length + INTEGER(size_t), DIMENSION(nfields) :: char_len_field_names ! field name's lengths + INTEGER(size_t) :: max_char_size_field_names ! character len of field names + TYPE(C_PTR), INTENT(in), VALUE :: data + END FUNCTION h5tbmake_table_ptr_c + END INTERFACE + + namelen = LEN(dset_name) + namelen1 = LEN(table_title) + + ! Find the size of each character string in the array + DO i = 1, nfields + char_len_field_names(i) = LEN_TRIM(field_names(i)) + END DO + + max_char_size_field_names = LEN(field_names(1)) + + errcode = h5tbmake_table_ptr_c(namelen1, table_title, loc_id, namelen, dset_name, nfields, nrecords,& + type_size, field_offset, field_types, chunk_size, fill_data, compress, char_len_field_names, & + max_char_size_field_names, field_names, data) + + END SUBROUTINE h5tbmake_table_ptr_f + + SUBROUTINE h5tbread_table_f(loc_id, table_name, nfields, dst_size, dst_offset, & + dst_sizes, dst_buf, errcode) + + USE ISO_C_BINDING + IMPLICIT NONE + INTEGER(hid_t), INTENT(in) :: loc_id ! An array containing the sizes of the fields + CHARACTER(LEN=*), INTENT(in) :: table_name ! The name of the dataset to read + INTEGER(hsize_t), INTENT(in) :: nfields ! number of fields + INTEGER(size_t), INTENT(in) :: dst_size ! The size of the structure type + INTEGER(size_t), DIMENSION(1:nfields), INTENT(in) :: dst_offset ! An array containing the offsets of the fields + INTEGER(size_t), DIMENSION(1:nfields), INTENT(in) :: dst_sizes ! An array containing the sizes of the fields + TYPE(C_PTR), INTENT(OUT) :: dst_buf ! Buffer with data + INTEGER :: errcode ! error code + + INTEGER(size_t) :: namelen ! name length + INTEGER(hsize_t) :: i ! general purpose integer + + INTERFACE + INTEGER FUNCTION h5tbread_table_c(loc_id,& + table_name,& + namelen,& + nfields,& + dst_size,& + dst_offset, & + dst_sizes, & + dst_buf) & + BIND(C,NAME='h5tbread_table_c') + IMPORT :: C_PTR + IMPORT :: HID_T, SIZE_T, HSIZE_T + IMPLICIT NONE + INTEGER(hid_t), INTENT(in) :: loc_id ! file or group identifier + CHARACTER(LEN=1), INTENT(in) :: table_name ! name of the dataset + INTEGER(hsize_t), INTENT(in) :: nfields + INTEGER(size_t), INTENT(in) :: dst_size ! type size + INTEGER(size_t), DIMENSION(1:nfields), INTENT(in) :: dst_offset ! An array containing the sizes of the fields + INTEGER(size_t), DIMENSION(1:nfields), INTENT(in) :: dst_sizes ! An array containing the sizes of the fields + INTEGER(size_t) :: namelen ! name length + TYPE(C_PTR), VALUE :: dst_buf + + END FUNCTION h5tbread_table_c + END INTERFACE + + namelen = LEN(table_name) + + errcode = h5tbread_table_c(loc_id,& + table_name,& + namelen, & + nfields, & + dst_size,& + dst_offset, & + dst_sizes, & + dst_buf) + + + END SUBROUTINE h5tbread_table_f !------------------------------------------------------------------------- ! Function: h5tbwrite_field_name_f_int diff --git a/hl/fortran/src/hdf5_hl_fortrandll.def.in b/hl/fortran/src/hdf5_hl_fortrandll.def.in index 9a1231a..b48cae3 100644 --- a/hl/fortran/src/hdf5_hl_fortrandll.def.in +++ b/hl/fortran/src/hdf5_hl_fortrandll.def.in @@ -72,7 +72,9 @@ H5LT_CONST_mp_H5LTGET_ATTRIBUTE_NDIMS_F H5LT_CONST_mp_H5LTGET_ATTRIBUTE_INFO_F H5LT_CONST_mp_H5LTPATH_VALID_F ; H5TB -H5TB_CONST_mp_H5TBMAKE_TABLE_F +H5TB_CONST_mp_H5TBREAD_TABLE_F +H5TB_CONST_mp_H5TBMAKE_TABLE_F90 +H5TB_CONST_mp_H5TBMAKE_TABLE_PTR_F H5TB_CONST_mp_H5TBWRITE_FIELD_NAME_F_INT H5TB_CONST_mp_H5TBWRITE_FIELD_NAME_F_STRING H5TB_CONST_mp_H5TBREAD_FIELD_NAME_F_INT diff --git a/hl/fortran/test/Makefile.am b/hl/fortran/test/Makefile.am index ca49817..32d367c 100644 --- a/hl/fortran/test/Makefile.am +++ b/hl/fortran/test/Makefile.am @@ -45,7 +45,7 @@ tstimage_SOURCES=tstimage.F90 tsttable_SOURCES=tsttable.F90 # Temporary files. -CHECK_CLEANFILES+=dsetf[1-5].h5 f1img.h5 f1tab.h5 tstds.h5 +CHECK_CLEANFILES+=dsetf[1-5].h5 f1img.h5 f[1-2]tab.h5 tstds.h5 # Mark this directory as part of the Fortran API (this affects output # from tests in conclude.am) diff --git a/hl/fortran/test/tstlite.F90 b/hl/fortran/test/tstlite.F90 index 081e61e..3937c3c 100644 --- a/hl/fortran/test/tstlite.F90 +++ b/hl/fortran/test/tstlite.F90 @@ -1300,11 +1300,14 @@ SUBROUTINE test_datasets() INTEGER(HID_T) :: file_id ! File identifier INTEGER :: errcode ! Error flag INTEGER, PARAMETER :: DIM1 = 10 ! Dimension of array + INTEGER, PARAMETER :: LEN0 = 3 + INTEGER, PARAMETER :: LEN1 = 12 CHARACTER(LEN=5), PARAMETER :: dsetname1 = "dset1" ! Dataset name CHARACTER(LEN=5), PARAMETER :: dsetname2 = "dset2" ! Dataset name CHARACTER(LEN=5), PARAMETER :: dsetname3 = "dset3" ! Dataset name CHARACTER(LEN=5), PARAMETER :: dsetname4 = "dset4" ! Dataset name CHARACTER(LEN=5), PARAMETER :: dsetname5 = "dset5" ! Dataset name + CHARACTER(LEN=5), PARAMETER :: dsetname6 = "dset6" ! Dataset name INTEGER(HSIZE_T), DIMENSION(1) :: dims = (/DIM1/) ! Dataset dimensions INTEGER(HSIZE_T), DIMENSION(1) :: dimsr ! Dataset dimensions INTEGER :: rank = 1 ! Dataset rank @@ -1317,7 +1320,7 @@ SUBROUTINE test_datasets() REAL, DIMENSION(DIM1) , TARGET :: bufr3 ! Data buffer DOUBLE PRECISION, DIMENSION(DIM1), TARGET :: buf4 ! Data buffer DOUBLE PRECISION, DIMENSION(DIM1), TARGET :: bufr4 ! Data buffer - INTEGER :: i, n ! general purpose integer + INTEGER :: i, j, n ! general purpose integer INTEGER :: has ! general purpose integer INTEGER :: type_class INTEGER(SIZE_T) :: type_size @@ -1326,6 +1329,17 @@ SUBROUTINE test_datasets() CHARACTER(LEN=8) :: chr_lg TYPE(C_PTR) :: f_ptr + ! vl data + TYPE vl + INTEGER, DIMENSION(:), POINTER :: DATA + END TYPE vl + TYPE(vl), DIMENSION(:), ALLOCATABLE, TARGET :: ptr + TYPE(hvl_t), DIMENSION(1:2), TARGET :: wdata ! Array of vlen structures + TYPE(hvl_t), DIMENSION(1:2), TARGET :: rdata ! Pointer to vlen structures + INTEGER(hsize_t), DIMENSION(1:1) :: dims_vl = (/2/) + INTEGER, DIMENSION(:), POINTER :: ptr_r + INTEGER(HID_T) :: type_id + ! ! Initialize FORTRAN predefined datatypes. ! @@ -1347,6 +1361,28 @@ SUBROUTINE test_datasets() n = n + 1 END DO + ! + ! Initialize variable-length data. wdata(1) is a countdown of + ! length LEN0, wdata(2) is a Fibonacci sequence of length LEN1. + ! + wdata(1)%len = LEN0 + wdata(2)%len = LEN1 + + ALLOCATE( ptr(1:2) ) + ALLOCATE( ptr(1)%data(1:wdata(1)%len) ) + ALLOCATE( ptr(2)%data(1:wdata(2)%len) ) + + DO i=1, wdata(1)%len + ptr(1)%data(i) = wdata(1)%len - i + 1 ! 3 2 1 + ENDDO + wdata(1)%p = C_LOC(ptr(1)%data(1)) + + ptr(2)%data(1:2) = 1 + DO i = 3, wdata(2)%len + ptr(2)%data(i) = ptr(2)%data(i-1) + ptr(2)%data(i-2) ! (1 1 2 3 5 8 etc.) + ENDDO + wdata(2)%p = C_LOC(ptr(2)%data(1)) + !------------------------------------------------------------------------- ! int !------------------------------------------------------------------------- @@ -1430,7 +1466,6 @@ SUBROUTINE test_datasets() !CALL h5ltread_dataset_f(file_id, dsetname4, H5T_NATIVE_DOUBLE, f_ptr, errcode) CALL h5ltread_dataset_double_f(file_id, dsetname4, bufr4, dims, errcode) - ! ! compare read and write buffers. ! @@ -1473,6 +1508,38 @@ SUBROUTINE test_datasets() CALL passed() + + !------------------------------------------------------------------------- + ! variable-length dataset + !------------------------------------------------------------------------- + CALL test_begin(' Make/Read datasets (vl) ') + ! + ! Create variable-length datatype. + ! + CALL H5Tvlen_create_f(H5T_NATIVE_INTEGER, type_id, errcode) + + f_ptr = C_LOC(wdata(1)) + CALL h5ltmake_dataset_f(file_id, dsetname6, 1, dims_vl, type_id, f_ptr, errcode) + + ! Read the variable-length datatype + f_ptr = C_LOC(rdata(1)) + CALL h5ltread_dataset_f(file_id, dsetname6, type_id, f_ptr, errcode) + + DO i = 1, INT(dims_vl(1)) + CALL c_f_pointer(rdata(i)%p, ptr_r, [rdata(i)%len] ) + DO j = 1, rdata(i)%len + IF(ptr_r(j).NE.ptr(i)%data(j))THEN + PRINT *, 'Writing/Reading variable-length dataset failed' + STOP + ENDIF + ENDDO + ENDDO + + CALL H5Tclose_f(type_id, errcode) + DEALLOCATE(ptr) + + CALL passed() + CALL test_begin(' Test h5ltpath_valid_f ') ! ! test function h5ltpath_valid_f @@ -1528,7 +1595,6 @@ SUBROUTINE test_datasets() CALL passed() - CALL test_begin(' Get dataset dimensions/info ') !------------------------------------------------------------------------- @@ -1573,6 +1639,8 @@ SUBROUTINE test_datasets() STOP ENDIF + CALL passed() + ! ! Close the file. ! @@ -1582,14 +1650,12 @@ SUBROUTINE test_datasets() ! CALL h5close_f(errcode) - CALL passed() ! ! end function. ! END SUBROUTINE test_datasets - !------------------------------------------------------------------------- ! test_attributes !------------------------------------------------------------------------- diff --git a/hl/fortran/test/tsttable.F90 b/hl/fortran/test/tsttable.F90 index 74029a5..5c55a66 100644 --- a/hl/fortran/test/tsttable.F90 +++ b/hl/fortran/test/tsttable.F90 @@ -20,7 +20,24 @@ PROGRAM table_test + USE H5TB ! module of H5TB + USE HDF5 ! module of HDF5 library + + IMPLICIT NONE + INTEGER :: errcode = 0 + + ! + ! Initialize FORTRAN predefined datatypes. + ! + CALL h5open_f(errcode) + CALL test_table1() + CALL test_table2() + + ! + ! Close FORTRAN predefined datatypes. + ! + CALL h5close_f(errcode) END PROGRAM table_test @@ -35,13 +52,13 @@ SUBROUTINE test_table1() USE HDF5 ! module of HDF5 library IMPLICIT NONE - + CHARACTER(len=8), PARAMETER :: filename = "f1tab.h5" ! File name CHARACTER(LEN=5), PARAMETER :: dsetname1 = "dset1" ! Dataset name INTEGER(HID_T) :: file_id ! File identifier INTEGER(HSIZE_T), PARAMETER :: nfields = 4 ! nfields INTEGER(HSIZE_T), PARAMETER :: nrecords = 5 ! nrecords - CHARACTER(LEN=10),DIMENSION(1:nfields) :: field_names ! field names + CHARACTER(LEN=9),DIMENSION(1:nfields) :: field_names ! field names INTEGER(SIZE_T), DIMENSION(1:nfields) :: field_offset ! field offset INTEGER(HID_T), DIMENSION(1:nfields) :: field_types ! field types INTEGER(HSIZE_T), PARAMETER :: chunk_size = 5 ! chunk size @@ -74,6 +91,7 @@ SUBROUTINE test_table1() INTEGER :: Cs_sizeof_double = H5_SIZEOF_DOUBLE ! C's sizeof double INTEGER :: SIZEOF_X LOGICAL :: Exclude_double + CHARACTER(LEN=62) :: test_txt ! Find size of DOUBLE PRECISION #ifdef H5_FORTRAN_HAVE_STORAGE_SIZE @@ -100,11 +118,6 @@ SUBROUTINE test_table1() END DO ! - ! Initialize FORTRAN predefined datatypes. - ! - CALL h5open_f(errcode) - - ! ! Create a new file using default properties. ! CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, errcode) @@ -164,7 +177,8 @@ SUBROUTINE test_table1() ! make table !------------------------------------------------------------------------- - CALL test_begin(' Make table ') + test_txt = " Make table" + CALL test_begin(test_txt) CALL h5tbmake_table_f(dsetname1,& file_id,& @@ -186,7 +200,8 @@ SUBROUTINE test_table1() ! write field !------------------------------------------------------------------------- - CALL test_begin(' Read/Write field by name ') + test_txt = "Read/Write field by name" + CALL test_begin(test_txt) CALL h5tbwrite_field_name_f(file_id,dsetname1,field_names(1),start,nrecords,type_sizec,& bufs,errcode) @@ -309,7 +324,8 @@ SUBROUTINE test_table1() ! write field !------------------------------------------------------------------------- - CALL test_begin(' Read/Write field by index ') + test_txt = "Read/Write field by index" + CALL test_begin(test_txt) CALL h5tbwrite_field_index_f(file_id,dsetname1,1,start,nrecords,type_sizec,& bufs,errcode) @@ -413,8 +429,8 @@ SUBROUTINE test_table1() ! Insert field ! we insert a field callsed "field5" with the same type and buffer as field 4 (Real) !------------------------------------------------------------------------- - - CALL test_begin(' Insert field ') + test_txt = "Insert field" + CALL test_begin(test_txt) CALL h5tbinsert_field_f(file_id,dsetname1,"field5",field_types(4),4,bufr,errcode) CALL h5tbread_field_index_f(file_id,dsetname1,5,start,nrecords,type_sizer,& @@ -437,7 +453,8 @@ SUBROUTINE test_table1() ! Delete field !------------------------------------------------------------------------- - CALL test_begin(' Delete field ') + test_txt = "Delete field" + CALL test_begin(test_txt) CALL h5tbdelete_field_f(file_id,dsetname1,"field4abc",errcode) @@ -448,7 +465,8 @@ SUBROUTINE test_table1() ! Gets the number of records and fields !------------------------------------------------------------------------- - CALL test_begin(' Get table info ') + test_txt = "Get table info" + CALL test_begin(test_txt) CALL h5tbget_table_info_f(file_id,dsetname1,nfieldsr,nrecordsr,errcode ) @@ -463,7 +481,8 @@ SUBROUTINE test_table1() ! Get information about fields !------------------------------------------------------------------------- - CALL test_begin(' Get fields info ') + test_txt = "Get fields info" + CALL test_begin(test_txt) CALL h5tbget_field_info_f(file_id, dsetname1, nfields, field_namesr, field_sizesr,& field_offsetr, type_sizeout, errcode, maxlen ) @@ -502,16 +521,196 @@ SUBROUTINE test_table1() ! CALL h5fclose_f(file_id, errcode) - ! - ! Close FORTRAN predefined datatypes. - ! - CALL h5close_f(errcode) ! ! end function. ! END SUBROUTINE test_table1 +!------------------------------------------------------------------------- +! test_table2 +! Tests F2003 versions of H5TBread_table_f and H5TBmake_table_f +!------------------------------------------------------------------------- + +SUBROUTINE test_table2() + + USE H5TB ! module of H5TB + USE HDF5 ! module of HDF5 library + + IMPLICIT NONE + + INTEGER, PARAMETER :: int_kind_8 = SELECTED_INT_KIND(9) !should map to INTEGER*4 on most modern processors + INTEGER, PARAMETER :: int_kind_16 = SELECTED_INT_KIND(9) ! (18) !should map to INTEGER*8 on most modern processors + INTEGER, PARAMETER :: sp = SELECTED_REAL_KIND(5) ! This should map to REAL*4 on most modern processors + INTEGER, PARAMETER :: dp = SELECTED_REAL_KIND(10) ! This should map to REAL*8 on most modern processors + + TYPE particle_t + CHARACTER(LEN=11) :: name + INTEGER(KIND=int_kind_8) :: lati + INTEGER(KIND=int_kind_16) :: long + REAL(KIND=sp) :: pressure + REAL(KIND=dp) :: temperature + END TYPE particle_t + + INTEGER(HSIZE_T), PARAMETER :: nfields = 5 ! nfields + INTEGER(HSIZE_T), PARAMETER :: nrecords = 8 ! nrecords + + CHARACTER(len=8), PARAMETER :: filename = "f2tab.h5" ! File name + CHARACTER(LEN=5), PARAMETER :: table_name = "tabel" ! table name + CHARACTER(LEN=10), PARAMETER :: table_name_fill = "tabel_fill" ! table name + + ! Define field information + CHARACTER(LEN=11), DIMENSION(1:NFIELDS), PARAMETER :: field_names = (/& + "Name ", & + "Latitude ", & + "Longitude ", & + "Pressure ", & + "Temperature" & + /) + + INTEGER(hid_t), DIMENSION(1:nfields) :: field_type + INTEGER(hid_t) :: string_type + INTEGER(hid_t) :: file_id + INTEGER(hsize_t), PARAMETER :: chunk_size = 10 + TYPE(particle_t), DIMENSION(1:nrecords), TARGET :: fill_data + INTEGER :: compress + INTEGER :: status + INTEGER :: i + INTEGER(SIZE_T) :: dst_size + TYPE(particle_t), DIMENSION(1:nrecords), TARGET :: dst_buf + INTEGER(SIZE_T), DIMENSION(1:nfields) :: dst_offset + INTEGER(SIZE_T), DIMENSION(1:nfields) :: dst_sizes + TYPE(particle_t), DIMENSION(1:nrecords), TARGET :: p_data + TYPE(particle_t), DIMENSION(1:nrecords), TARGET :: r_data + + TYPE(C_PTR) :: f_ptr1, f_ptr2, f_ptr3 + + INTEGER :: errcode + CHARACTER(LEN=62) :: test_txt + + test_txt = "Testing H5TBread_table_f and H5TBmake_table_f (F2003)" + CALL test_begin(test_txt) + + ! Define an array of Particles + p_data(1:nrecords) = (/ & + particle_t("zero ",0_int_kind_8,0_int_kind_16,0.0_sp,0.0_dp), & + particle_t("one ",10_int_kind_8,10_int_kind_16,10.0_sp,10.0_dp), & + particle_t("two ",20_int_kind_8,20_int_kind_16,20.0_sp,20.0_dp), & + particle_t("three ",30_int_kind_8,30_int_kind_16,30.0_sp,30.0_dp),& + particle_t("four ",40_int_kind_8,40_int_kind_16,40.0_sp,40.0_dp), & + particle_t("five ",50_int_kind_8,50_int_kind_16,50.0_sp,50.0_dp), & + particle_t("six ",60_int_kind_8,60_int_kind_16,60.0_sp,60.0_dp), & + particle_t("seven ",70_int_kind_8,70_int_kind_16,70.0_sp,70.0_dp) & + /) + + fill_data(1:nrecords) = particle_t("no data",-1_int_kind_8, -2_int_kind_16, -99.0_sp, -100.0_dp) + + compress = 0 + + dst_size = H5OFFSETOF(C_LOC(dst_buf(1)), C_LOC(dst_buf(2))) + +#ifdef H5_FORTRAN_HAVE_STORAGE_SIZE + dst_sizes(1:nfields) = (/ & + storage_size(dst_buf(1)%name)/storage_size(c_char_'a'), & + storage_size(dst_buf(1)%lati)/storage_size(c_char_'a'), & + storage_size(dst_buf(1)%long)/storage_size(c_char_'a'), & + storage_size(dst_buf(1)%pressure)/storage_size(c_char_'a'), & + storage_size(dst_buf(1)%temperature)/storage_size(c_char_'a') & + /) +#else + dst_sizes(1:nfields) = (/ & + sizeof(dst_buf(1)%name), & + sizeof(dst_buf(1)%lati), & + sizeof(dst_buf(1)%long), & + sizeof(dst_buf(1)%pressure), & + sizeof(dst_buf(1)%temperature) & + /) +#endif + + dst_offset(1:nfields) = (/ & + H5OFFSETOF(C_LOC(dst_buf(1)), C_LOC(dst_buf(1)%name(1:1))), & + H5OFFSETOF(C_LOC(dst_buf(1)), C_LOC(dst_buf(1)%lati)), & + H5OFFSETOF(C_LOC(dst_buf(1)), C_LOC(dst_buf(1)%long)), & + H5OFFSETOF(C_LOC(dst_buf(1)), C_LOC(dst_buf(1)%pressure)), & + H5OFFSETOF(C_LOC(dst_buf(1)), C_LOC(dst_buf(1)%temperature)) & + /) + + ! Initialize field_type + CALL H5Tcopy_f(H5T_FORTRAN_S1, string_type, errcode) + CALL H5Tset_size_f(string_type, INT(11,size_t), errcode) + + field_type(1:5) = (/ & + string_type,& + h5kind_to_type(KIND(dst_buf(1)%lati), H5_INTEGER_KIND),& + h5kind_to_type(KIND(dst_buf(1)%long), H5_INTEGER_KIND),& + h5kind_to_type(KIND(dst_buf(1)%pressure), H5_REAL_KIND),& + h5kind_to_type(KIND(dst_buf(1)%temperature), H5_REAL_KIND) & + /) + + ! + ! Create a new file using default properties. + ! + CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, errcode) + + ! Check setting the fill values + + f_ptr1 = C_NULL_PTR + f_ptr2 = C_LOC(fill_data(1)%name(1:1)) + + CALL h5tbmake_table_f("Table Title Fill", file_id, table_name_fill, nfields, nrecords, & + dst_size, field_names, dst_offset, field_type, & + chunk_size, f_ptr2, compress, f_ptr1, errcode ) + + f_ptr3 = C_LOC(r_data(1)%name(1:1)) + CALL h5tbread_table_f(file_id, table_name_fill, nfields, dst_size, dst_offset, dst_sizes, f_ptr3, errcode) + + DO i = 1, nfields + IF(r_data(i)%name.NE.fill_data(i)%name.OR. & + r_data(i)%lati.NE.fill_data(i)%lati.OR. & + r_data(i)%long.NE.fill_data(i)%long.OR. & + r_data(i)%pressure.NE.fill_data(i)%pressure.OR. & + r_data(i)%temperature.NE.fill_data(i)%temperature)THEN + PRINT*,'H5TBmake/read_table_f --filled-- FAILED' + STOP + ENDIF + ENDDO + + ! Check setting the table values + + f_ptr1 = C_LOC(p_data(1)%name(1:1)) + f_ptr2 = C_NULL_PTR + + CALL h5tbmake_table_f("Table Title",file_id, table_name, nfields, nrecords, & + dst_size, field_names, dst_offset, field_type, & + chunk_size, f_ptr2, compress, f_ptr1, errcode ) + + f_ptr3 = C_LOC(r_data(1)%name(1:1)) + CALL h5tbread_table_f(file_id, table_name, nfields, dst_size, dst_offset, dst_sizes, f_ptr3, errcode) + + DO i = 1, nfields + IF(r_data(i)%name.NE.p_data(i)%name.OR. & + r_data(i)%lati.NE.p_data(i)%lati.OR. & + r_data(i)%long.NE.p_data(i)%long.OR. & + r_data(i)%pressure.NE.p_data(i)%pressure.OR. & + r_data(i)%temperature.NE.p_data(i)%temperature)THEN + PRINT*,'H5TBmake/read_table_f FAILED' + STOP + ENDIF + ENDDO + + CALL passed() + + !------------------------------------------------------------------------- + ! end + !------------------------------------------------------------------------- + + ! + ! Close the file. + ! + CALL h5fclose_f(file_id, errcode) + +END SUBROUTINE test_table2 + !------------------------------------------------------------------------- ! test_begin @@ -519,8 +718,7 @@ END SUBROUTINE test_table1 SUBROUTINE test_begin(string) CHARACTER(LEN=*), INTENT(IN) :: string - WRITE(*, fmt = '(14a)', advance = 'no') string - WRITE(*, fmt = '(40x,a)', advance = 'no') ' ' + WRITE(*, fmt = '(A)', ADVANCE = 'no') string END SUBROUTINE test_begin !------------------------------------------------------------------------- @@ -528,7 +726,7 @@ END SUBROUTINE test_begin !------------------------------------------------------------------------- SUBROUTINE passed() - WRITE(*, fmt = '(6a)') 'PASSED' + WRITE(*, fmt = '(T12,A6)') 'PASSED' END SUBROUTINE passed diff --git a/hl/test/test_lite.c b/hl/test/test_lite.c index f3258d6..6cadd95 100644 --- a/hl/test/test_lite.c +++ b/hl/test/test_lite.c @@ -2066,6 +2066,15 @@ static int test_valid_path(void) /************************************** * CHECK ABSOLUTE PATHS **************************************/ + + if( (path_valid = H5LTpath_valid(file_id, "/", TRUE)) != TRUE) { + goto out; + } + + if( (path_valid = H5LTpath_valid(file_id, "/", FALSE)) != TRUE) { + goto out; + } + if( (path_valid = H5LTpath_valid(file_id, "/G1", TRUE)) != TRUE) { goto out; } @@ -2112,6 +2121,20 @@ static int test_valid_path(void) * CHECK RELATIVE PATHS ***************************************/ + if( (group = H5Gopen2(file_id, "/", H5P_DEFAULT)) < 0) + goto out; + + if( (path_valid = H5LTpath_valid(group, "/", TRUE)) != TRUE) { + goto out; + } + + if( (path_valid = H5LTpath_valid(group, "/", FALSE)) != TRUE) { + goto out; + } + + if(H5Gclose(group)<0) + goto out; + if( (group = H5Gopen2(file_id, "/G1", H5P_DEFAULT)) < 0) goto out; diff --git a/java/src/hdf/hdf5lib/CMakeLists.txt b/java/src/hdf/hdf5lib/CMakeLists.txt index c2daff4..47aa05b 100644 --- a/java/src/hdf/hdf5lib/CMakeLists.txt +++ b/java/src/hdf/hdf5lib/CMakeLists.txt @@ -11,7 +11,7 @@ INCLUDE_DIRECTORIES ( ) SET_GLOBAL_VARIABLE (HDF5_JAVA_SOURCE_PACKAGES - "${HDFJAVA_SOURCE_PACKAGES};hdf.hdf5lib.callbacks;hdf.hdf5lib.exceptions;hdf.hdf5lib.structs;hdf.hdf5lib" + "${HDF5_JAVA_SOURCE_PACKAGES};hdf.hdf5lib.callbacks;hdf.hdf5lib.exceptions;hdf.hdf5lib.structs;hdf.hdf5lib" ) set (HDF5_JAVA_HDF_HDF5_CALLBACKS_SRCS diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in index 698dc5d..f37703a 100644 --- a/java/test/junit.sh.in +++ b/java/test/junit.sh.in @@ -34,6 +34,9 @@ AWK='awk' nerrors=0 verbose=yes +# setup my machine information. +myos=`uname -s` + # where the libs exist HDFLIB_HOME="$top_srcdir/java/lib" BLDLIBDIR="$top_builddir/java/lib" @@ -231,13 +234,18 @@ sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \ -e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \ $actual_ext > $actual -if $CMP $expect $actual; then - echo " PASSED" +# SunOS does not support this. Skip it. +if [ $myos = SunOS ]; then + echo " SKIPPED" else - echo "*FAILED*" - echo " Expected result differs from actual result" - nerrors="`expr $nerrors + 1`" - test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /' + if $CMP $expect $actual; then + echo " PASSED" + else + echo "*FAILED*" + echo " Expected result differs from actual result" + nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /' + fi fi diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1e0061f..ebb7f89 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -103,12 +103,15 @@ IDE_GENERATED_PROPERTIES ("H5CS" "${H5CS_HDRS}" "${H5CS_SRCS}" ) set (H5D_SRCS ${HDF5_SRC_DIR}/H5D.c ${HDF5_SRC_DIR}/H5Dbtree.c + ${HDF5_SRC_DIR}/H5Dbtree2.c ${HDF5_SRC_DIR}/H5Dchunk.c ${HDF5_SRC_DIR}/H5Dcompact.c ${HDF5_SRC_DIR}/H5Dcontig.c ${HDF5_SRC_DIR}/H5Ddbg.c ${HDF5_SRC_DIR}/H5Ddeprec.c + ${HDF5_SRC_DIR}/H5Dearray.c ${HDF5_SRC_DIR}/H5Defl.c + ${HDF5_SRC_DIR}/H5Dfarray.c ${HDF5_SRC_DIR}/H5Dfill.c ${HDF5_SRC_DIR}/H5Dint.c ${HDF5_SRC_DIR}/H5Dio.c diff --git a/src/H5Aint.c b/src/H5Aint.c index 1736dc0..ffb8667 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -207,7 +207,7 @@ H5A_create(const H5G_loc_t *loc, const char *name, const H5T_t *type, HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "invalid datatype location") /* Set the latest format for datatype, if requested */ - if(H5F_USE_LATEST_FORMAT(loc->oloc->file)) + if(H5F_USE_LATEST_FLAGS(loc->oloc->file, H5F_LATEST_DATATYPE)) if(H5T_set_latest_version(attr->shared->dt) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of datatype") @@ -215,7 +215,7 @@ H5A_create(const H5G_loc_t *loc, const char *name, const H5T_t *type, attr->shared->ds = H5S_copy(space, FALSE, TRUE); /* Set the latest format for dataspace, if requested */ - if(H5F_USE_LATEST_FORMAT(loc->oloc->file)) + if(H5F_USE_LATEST_FLAGS(loc->oloc->file, H5F_LATEST_DATASPACE)) if(H5S_set_latest_version(attr->shared->ds) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of dataspace") @@ -1853,7 +1853,7 @@ herr_t H5A_set_version(const H5F_t *f, H5A_t *attr) { hbool_t type_shared, space_shared; /* Flags to indicate that shared messages are used for this attribute */ - hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ + hbool_t use_latest_format; /* Flag indicating the latest attribute version support is enabled */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -1862,8 +1862,8 @@ H5A_set_version(const H5F_t *f, H5A_t *attr) HDassert(f); HDassert(attr); - /* Get the file's 'use the latest version of the format' flag */ - use_latest_format = H5F_USE_LATEST_FORMAT(f); + /* Get the file's 'use the latest attribute version support' flag */ + use_latest_format = H5F_USE_LATEST_FLAGS(f, H5F_LATEST_ATTRIBUTE); /* Check whether datatype and dataspace are shared */ if(H5O_msg_is_shared(H5O_DTYPE_ID, attr->shared->dt) > 0) @@ -2354,8 +2354,7 @@ H5A_dense_post_copy_file_all(const H5O_loc_t *src_oloc, const H5O_ainfo_t *ainfo attr_op.op_type = H5A_ATTR_OP_LIB; attr_op.u.lib_op = H5A__dense_post_copy_file_cb; - - if(H5A_dense_iterate(src_oloc->file, dxpl_id, (hid_t)0, ainfo_src, H5_INDEX_NAME, + if(H5A_dense_iterate(src_oloc->file, dxpl_id, (hid_t)0, ainfo_src, H5_INDEX_NAME, H5_ITER_NATIVE, (hsize_t)0, NULL, &attr_op, &udata) < 0) HGOTO_ERROR(H5E_ATTR, H5E_CANTINIT, FAIL, "error building attribute table") @@ -87,6 +87,8 @@ extern const H5B2_class_t H5G_BT2_CORDER[1]; extern const H5B2_class_t H5SM_INDEX[1]; extern const H5B2_class_t H5A_BT2_NAME[1]; extern const H5B2_class_t H5A_BT2_CORDER[1]; +extern const H5B2_class_t H5D_BT2[1]; +extern const H5B2_class_t H5D_BT2_FILT[1]; extern const H5B2_class_t H5B2_TEST2[1]; const H5B2_class_t *const H5B2_client_class_g[] = { @@ -100,7 +102,9 @@ const H5B2_class_t *const H5B2_client_class_g[] = { H5SM_INDEX, /* 7 - H5B2_SOHM_INDEX_ID */ H5A_BT2_NAME, /* 8 - H5B2_ATTR_DENSE_NAME_ID */ H5A_BT2_CORDER, /* 9 - H5B2_ATTR_DENSE_CORDER_ID */ - H5B2_TEST2, /* 10 - H5B2_TEST_ID */ + H5D_BT2, /* 10 - H5B2_CDSET_ID */ + H5D_BT2_FILT, /* 11 - H5B2_CDSET_FILT_ID */ + H5B2_TEST2 /* 12 - H5B2_TEST_ID */ }; @@ -1445,3 +1449,33 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5B2_delete() */ + +/*------------------------------------------------------------------------- + * Function: H5B2_patch_file + * + * Purpose: Patch the top-level file pointer contained in bt2 + * to point to idx_info->f if they are different. + * This is possible because the file pointer in bt2 can be + * closed out if bt2 remains open. + * + * Return: SUCCEED + * + *------------------------------------------------------------------------- + */ +herr_t +H5B2_patch_file(H5B2_t *bt2, H5F_t *f) +{ + FUNC_ENTER_NOAPI_NOINIT_NOERR + + /* + * Check arguments. + */ + HDassert(bt2); + HDassert(f); + + if(bt2->f != f || bt2->hdr->f != f) + bt2->f = bt2->hdr->f = f; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5B2_patch_file() */ + diff --git a/src/H5B2private.h b/src/H5B2private.h index b509869..3caf41f 100644 --- a/src/H5B2private.h +++ b/src/H5B2private.h @@ -54,6 +54,8 @@ typedef enum H5B2_subid_t { H5B2_SOHM_INDEX_ID, /* B-tree is an index for shared object header messages */ H5B2_ATTR_DENSE_NAME_ID, /* B-tree is for indexing 'name' field for "dense" attribute storage on objects */ H5B2_ATTR_DENSE_CORDER_ID, /* B-tree is for indexing 'creation order' field for "dense" attribute storage on objects */ + H5B2_CDSET_ID, /* B-tree is for non-filtered chunked dataset storage w/ >1 unlim dims */ + H5B2_CDSET_FILT_ID, /* B-tree is for filtered chunked dataset storage w/ >1 unlim dims */ H5B2_TEST2_ID, /* Another B-tree is for testing (do not use for actual data) */ H5B2_NUM_BTREE_ID /* Number of B-tree IDs (must be last) */ } H5B2_subid_t; @@ -151,6 +153,7 @@ H5_DLL herr_t H5B2_size(H5B2_t *bt2, hid_t dxpl_id, H5_DLL herr_t H5B2_close(H5B2_t *bt2, hid_t dxpl_id); H5_DLL herr_t H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata, H5B2_remove_t op, void *op_data); +H5_DLL herr_t H5B2_patch_file(H5B2_t *fa, H5F_t *f); /* Statistics routines */ H5_DLL herr_t H5B2_stat_info(H5B2_t *bt2, H5B2_stat_t *info); diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c new file mode 100644 index 0000000..9de609f --- /dev/null +++ b/src/H5Dbtree2.c @@ -0,0 +1,1491 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * + * Purpose: v2 B-tree indexing for chunked datasets with > 1 unlimited dimensions. + * Each dataset chunk in the b-tree is identified by its dimensional offset. + * + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Dmodule.h" /* This source code file is part of the H5D module */ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector and array functions */ + + +/****************/ +/* Local Macros */ +/****************/ + + +/******************/ +/* Local Typedefs */ +/******************/ +/* User data for creating callback context */ +typedef struct H5D_bt2_ctx_ud_t { + const H5F_t *f; /* Pointer to file info */ + uint32_t chunk_size; /* Size of chunk (bytes; for filtered object) */ + unsigned ndims; /* Number of dimensions */ + uint32_t *dim; /* Size of chunk in elements */ +} H5D_bt2_ctx_ud_t; + +/* The callback context */ +typedef struct H5D_bt2_ctx_t { + uint32_t chunk_size; /* Size of chunk (bytes; constant for unfiltered object) */ + size_t sizeof_addr; /* Size of file addresses in the file (bytes) */ + size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */ + unsigned ndims; /* Number of dimensions in chunk */ + uint32_t *dim; /* Size of chunk in elements */ +} H5D_bt2_ctx_t; + +/* User data for the chunk's removal callback routine */ +typedef struct H5D_bt2_remove_ud_t { + H5F_t *f; /* File pointer for operation */ + hid_t dxpl_id; /* DXPL ID for operation */ +} H5D_bt2_remove_ud_t; + +/* Callback info for iteration over chunks in v2 B-tree */ +typedef struct H5D_bt2_it_ud_t { + H5D_chunk_cb_func_t cb; /* Callback routine for the chunk */ + void *udata; /* User data for the chunk's callback routine */ +} H5D_bt2_it_ud_t; + +/* User data for compare callback */ +typedef struct H5D_bt2_ud_t { + H5D_chunk_rec_t rec; /* The record to search for */ + unsigned ndims; /* Number of dimensions for the chunked dataset */ +} H5D_bt2_ud_t; + + +/********************/ +/* Local Prototypes */ +/********************/ + +/* Shared v2 B-tree methods for indexing filtered and non-filtered chunked datasets */ +static void *H5D__bt2_crt_context(void *udata); +static herr_t H5D__bt2_dst_context(void *ctx); +static herr_t H5D__bt2_store(void *native, const void *udata); +static herr_t H5D__bt2_compare(const void *rec1, const void *rec2, int *result); + +/* v2 B-tree class for indexing non-filtered chunked datasets */ +static herr_t H5D__bt2_unfilt_encode(uint8_t *raw, const void *native, void *ctx); +static herr_t H5D__bt2_unfilt_decode(const uint8_t *raw, void *native, void *ctx); +static herr_t H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth, + const void *record, const void *u_ctx); + +/* v2 B-tree class for indexing filtered chunked datasets */ +static herr_t H5D__bt2_filt_encode(uint8_t *raw, const void *native, void *ctx); +static herr_t H5D__bt2_filt_decode(const uint8_t *raw, void *native, void *ctx); +static herr_t H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, + const void *record, const void *u_ctx); + +/* Helper routine */ +static herr_t H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info); + +/* Callback for H5B2_iterate() which is called in H5D__bt2_idx_iterate() */ +static int H5D__bt2_idx_iterate_cb(const void *_record, void *_udata); + +/* Callback for H5B2_find() which is called in H5D__bt2_idx_get_addr() */ +static herr_t H5D__bt2_found_cb(const void *nrecord, void *op_data); + +/* + * Callback for H5B2_remove() and H5B2_delete() which is called + * in H5D__bt2_idx_remove() and H5D__bt2_idx_delete(). + */ +static herr_t H5D__bt2_remove_cb(const void *nrecord, void *_udata); + +/* Callback for H5B2_modify() which is called in H5D__bt2_idx_insert() */ +static herr_t H5D__bt2_mod_cb(void *_record, void *_op_data, hbool_t *changed); + +/* Chunked layout indexing callbacks for v2 B-tree indexing */ +static herr_t H5D__bt2_idx_init(const H5D_chk_idx_info_t *idx_info, + const H5S_t *space, haddr_t dset_ohdr_addr); +static herr_t H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info); +static hbool_t H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage); +static herr_t H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata, const H5D_t *dset); +static herr_t H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +static int H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); +static herr_t H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_common_ud_t *udata); +static herr_t H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst); +static herr_t H5D__bt2_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, + H5O_storage_chunk_t *storage_dst, hid_t dxpl_id); +static herr_t H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *size); +static herr_t H5D__bt2_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr); +static herr_t H5D__bt2_idx_dump(const H5O_storage_chunk_t *storage, + FILE *stream); +static herr_t H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info); + + +/*********************/ +/* Package Variables */ +/*********************/ + +/* Chunked dataset I/O ops for v2 B-tree indexing */ +const H5D_chunk_ops_t H5D_COPS_BT2[1] = {{ + H5D__bt2_idx_init, /* init */ + H5D__bt2_idx_create, /* create */ + H5D__bt2_idx_is_space_alloc, /* is_space_alloc */ + H5D__bt2_idx_insert, /* insert */ + H5D__bt2_idx_get_addr, /* get_addr */ + NULL, /* resize */ + H5D__bt2_idx_iterate, /* iterate */ + H5D__bt2_idx_remove, /* remove */ + H5D__bt2_idx_delete, /* delete */ + H5D__bt2_idx_copy_setup, /* copy_setup */ + H5D__bt2_idx_copy_shutdown, /* copy_shutdown */ + H5D__bt2_idx_size, /* size */ + H5D__bt2_idx_reset, /* reset */ + H5D__bt2_idx_dump, /* dump */ + H5D__bt2_idx_dest /* destroy */ +}}; + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/* v2 B-tree class for indexing non-filtered chunked datasets */ +const H5B2_class_t H5D_BT2[1] = {{ /* B-tree class information */ + H5B2_CDSET_ID, /* Type of B-tree */ + "H5B2_CDSET_ID", /* Name of B-tree class */ + sizeof(H5D_chunk_rec_t), /* Size of native record */ + H5D__bt2_crt_context, /* Create client callback context */ + H5D__bt2_dst_context, /* Destroy client callback context */ + H5D__bt2_store, /* Record storage callback */ + H5D__bt2_compare, /* Record comparison callback */ + H5D__bt2_unfilt_encode, /* Record encoding callback */ + H5D__bt2_unfilt_decode, /* Record decoding callback */ + H5D__bt2_unfilt_debug /* Record debugging callback */ +}}; + +/* v2 B-tree class for indexing filtered chunked datasets */ +const H5B2_class_t H5D_BT2_FILT[1] = {{ /* B-tree class information */ + H5B2_CDSET_FILT_ID, /* Type of B-tree */ + "H5B2_CDSET_FILT_ID", /* Name of B-tree class */ + sizeof(H5D_chunk_rec_t), /* Size of native record */ + H5D__bt2_crt_context, /* Create client callback context */ + H5D__bt2_dst_context, /* Destroy client callback context */ + H5D__bt2_store, /* Record storage callback */ + H5D__bt2_compare, /* Record comparison callback */ + H5D__bt2_filt_encode, /* Record encoding callback */ + H5D__bt2_filt_decode, /* Record decoding callback */ + H5D__bt2_filt_debug /* Record debugging callback */ +}}; + + +/*******************/ +/* Local Variables */ +/*******************/ + +/* Declare a free list to manage the H5D_bt2_ctx_t struct */ +H5FL_DEFINE_STATIC(H5D_bt2_ctx_t); +/* Declare a free list to manage the page elements */ +H5FL_BLK_DEFINE(chunk_dim); + + + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_crt_context + * + * Purpose: Create client callback context + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static void * +H5D__bt2_crt_context(void *_udata) +{ + H5D_bt2_ctx_ud_t *udata = (H5D_bt2_ctx_ud_t *)_udata; /* User data for building callback context */ + H5D_bt2_ctx_t *ctx; /* Callback context structure */ + uint32_t *my_dim = NULL; /* Pointer to copy of chunk dimension size */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity check */ + HDassert(udata); + HDassert(udata->f); + HDassert(udata->ndims > 0 && udata->ndims < H5O_LAYOUT_NDIMS); + + /* Allocate callback context */ + if(NULL == (ctx = H5FL_MALLOC(H5D_bt2_ctx_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate callback context") + + /* Determine the size of addresses and set the chunk size and # of dimensions for the dataset */ + ctx->sizeof_addr = H5F_SIZEOF_ADDR(udata->f); + ctx->chunk_size = udata->chunk_size; + ctx->ndims = udata->ndims; + + /* Set up the "local" information for this dataset's chunk dimension sizes */ + if(NULL == (my_dim = (uint32_t *)H5FL_BLK_MALLOC(chunk_dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t)))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate chunk dims") + HDmemcpy(my_dim, udata->dim, H5O_LAYOUT_NDIMS * sizeof(uint32_t)); + ctx->dim = my_dim; + + /* + * Compute the size required for encoding the size of a chunk, + * allowing for an extra byte, in case the filter makes the chunk larger. + */ + ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8); + if(ctx->chunk_size_len > 8) + ctx->chunk_size_len = 8; + + /* Set return value */ + ret_value = ctx; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_crt_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_dst_context + * + * Purpose: Destroy client callback context + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_dst_context(void *_ctx) +{ + H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity check */ + HDassert(ctx); + + /* Free array for chunk dimension sizes */ + if(ctx->dim) + (void)H5FL_BLK_FREE(chunk_dim, ctx->dim); + /* Release callback context */ + ctx = H5FL_FREE(H5D_bt2_ctx_t, ctx); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_dst_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_store + * + * Purpose: Store native information into record for v2 B-tree + * (non-filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_store(void *record, const void *_udata) +{ + const H5D_bt2_ud_t *udata = (const H5D_bt2_ud_t *)_udata; /* User data */ + + FUNC_ENTER_STATIC_NOERR + + *(H5D_chunk_rec_t *)record = udata->rec; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_store() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_compare + * + * Purpose: Compare two native information records, according to some key + * (non-filtered) + * + * Return: <0 if rec1 < rec2 + * =0 if rec1 == rec2 + * >0 if rec1 > rec2 + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_compare(const void *_udata, const void *_rec2, int *result) +{ + const H5D_bt2_ud_t *udata = (const H5D_bt2_ud_t *)_udata; /* User data */ + const H5D_chunk_rec_t *rec1 = &(udata->rec); /* The search record */ + const H5D_chunk_rec_t *rec2 = (const H5D_chunk_rec_t *)_rec2; /* The native record */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(rec1); + HDassert(rec2); + + /* Compare the offsets but ignore the other fields */ + *result = H5VM_vector_cmp_u(udata->ndims, rec1->scaled, rec2->scaled); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_compare() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_unfilt_encode + * + * Purpose: Encode native information into raw form for storing on disk + * (non-filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_unfilt_encode(uint8_t *raw, const void *_record, void *_ctx) +{ + H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ + const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ + unsigned u; /* Local index varible */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity check */ + HDassert(ctx); + + /* Encode the record's fields */ + H5F_addr_encode_len(ctx->sizeof_addr, &raw, record->chunk_addr); + /* (Don't encode the chunk size & filter mask for non-filtered B-tree records) */ + for(u = 0; u < ctx->ndims; u++) + UINT64ENCODE(raw, record->scaled[u]); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_unfilt_encode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_unfilt_decode + * + * Purpose: Decode raw disk form of record into native form + * (non-filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_unfilt_decode(const uint8_t *raw, void *_record, void *_ctx) +{ + H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ + H5D_chunk_rec_t *record = (H5D_chunk_rec_t *)_record; /* The native record */ + unsigned u; /* Local index variable */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity check */ + HDassert(ctx); + + /* Decode the record's fields */ + H5F_addr_decode_len(ctx->sizeof_addr, &raw, &record->chunk_addr); + record->nbytes = ctx->chunk_size; + record->filter_mask = 0; + for(u = 0; u < ctx->ndims; u++) + UINT64DECODE(raw, record->scaled[u]); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_unfilt_decode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_unfilt_debug + * + * Purpose: Debug native form of record (non-filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_unfilt_debug(FILE *stream, int indent, int fwidth, + const void *_record, const void *_ctx) +{ + const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ + const H5D_bt2_ctx_t *ctx = (const H5D_bt2_ctx_t *)_ctx; /* Callback context */ + unsigned u; /* Local index variable */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(record); + HDassert(ctx->chunk_size == record->nbytes); + HDassert(0 == record->filter_mask); + + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, "Chunk address:", record->chunk_addr); + + HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); + for(u = 0; u < ctx->ndims; u++) + HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * ctx->dim[u]); + HDfputs("}\n", stream); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_unfilt_debug() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_filt_encode + * + * Purpose: Encode native information into raw form for storing on disk + * (filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_filt_encode(uint8_t *raw, const void *_record, void *_ctx) +{ + H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ + const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ + unsigned u; /* Local index variable */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity check */ + HDassert(ctx); + HDassert(record); + HDassert(H5F_addr_defined(record->chunk_addr)); + HDassert(0 != record->nbytes); + + /* Encode the record's fields */ + H5F_addr_encode_len(ctx->sizeof_addr, &raw, record->chunk_addr); + UINT64ENCODE_VAR(raw, record->nbytes, ctx->chunk_size_len); + UINT32ENCODE(raw, record->filter_mask); + for(u = 0; u < ctx->ndims; u++) + UINT64ENCODE(raw, record->scaled[u]); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_filt_encode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_filt_decode + * + * Purpose: Decode raw disk form of record into native form + * (filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_filt_decode(const uint8_t *raw, void *_record, void *_ctx) +{ + H5D_bt2_ctx_t *ctx = (H5D_bt2_ctx_t *)_ctx; /* Callback context structure */ + H5D_chunk_rec_t *record = (H5D_chunk_rec_t *)_record; /* The native record */ + unsigned u; /* Local index variable */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity check */ + HDassert(ctx); + HDassert(record); + + /* Decode the record's fields */ + H5F_addr_decode_len(ctx->sizeof_addr, &raw, &record->chunk_addr); + UINT64DECODE_VAR(raw, record->nbytes, ctx->chunk_size_len); + UINT32DECODE(raw, record->filter_mask); + for(u = 0; u < ctx->ndims; u++) + UINT64DECODE(raw, record->scaled[u]); + + /* Sanity checks */ + HDassert(H5F_addr_defined(record->chunk_addr)); + HDassert(0 != record->nbytes); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_filt_decode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_filt_debug + * + * Purpose: Debug native form of record (filtered) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_filt_debug(FILE *stream, int indent, int fwidth, + const void *_record, const void *_ctx) +{ + const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ + const H5D_bt2_ctx_t *ctx = (const H5D_bt2_ctx_t *)_ctx; /* Callback context */ + unsigned u; /* Local index variable */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(record); + HDassert(H5F_addr_defined(record->chunk_addr)); + HDassert(0 != record->nbytes); + + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, "Chunk address:", record->chunk_addr); + HDfprintf(stream, "%*s%-*s %u bytes\n", indent, "", fwidth, "Chunk size:", (unsigned)record->nbytes); + HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", record->filter_mask); + + HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:"); + for(u = 0; u < ctx->ndims; u++) + HDfprintf(stream, "%s%Hd", u?", ":"", record->scaled[u] * ctx->dim[u]); + HDfputs("}\n", stream); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_filt_debug() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_init + * + * Purpose: Initialize the indexing information for a dataset. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * Wednesday, May 23, 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_init(const H5D_chk_idx_info_t H5_ATTR_UNUSED *idx_info, + const H5S_t H5_ATTR_UNUSED *space, haddr_t dset_ohdr_addr) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(H5F_addr_defined(dset_ohdr_addr)); + + idx_info->storage->u.btree2.dset_ohdr_addr = dset_ohdr_addr; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__bt2_idx_init() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_open() + * + * Purpose: Opens an existing v2 B-tree. + * + * Note: This information is passively initialized from each index + * operation callback because those abstract chunk index operations + * are designed to work with the v2 B-tree chunk indices also, + * which don't require an 'open' for the data structure. + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_bt2_ctx_ud_t u_ctx; /* user data for creating context */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(H5D_CHUNK_IDX_BT2 == idx_info->layout->idx_type); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(NULL == idx_info->storage->u.btree2.bt2); + + /* Set up the user data */ + u_ctx.f = idx_info->f; + u_ctx.ndims = idx_info->layout->ndims - 1; + u_ctx.chunk_size = idx_info->layout->size; + u_ctx.dim = idx_info->layout->dim; + + /* Open v2 B-tree for the chunk index */ + if(NULL == (idx_info->storage->u.btree2.bt2 = H5B2_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &u_ctx))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open v2 B-tree for tracking chunked dataset") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_open() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_create + * + * Purpose: Create the v2 B-tree for tracking dataset chunks + * + * Return: SUCCEED/FAIL + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_create(const H5D_chk_idx_info_t *idx_info) +{ + H5B2_create_t bt2_cparam; /* v2 B-tree creation parameters */ + H5D_bt2_ctx_ud_t u_ctx; /* data for context call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(!H5F_addr_defined(idx_info->storage->idx_addr)); + + bt2_cparam.rrec_size = H5F_SIZEOF_ADDR(idx_info->f) /* Address of chunk */ + + (idx_info->layout->ndims - 1) * 8; /* # of dimensions x 64-bit chunk offsets */ + + /* General parameters */ + if(idx_info->pline->nused > 0) { + unsigned chunk_size_len; /* Size of encoded chunk size */ + + /* + * Compute the size required for encoding the size of a chunk, + * allowing for an extra byte, in case the filter makes the chunk larger. + */ + chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8); + if(chunk_size_len > 8) + chunk_size_len = 8; + + bt2_cparam.rrec_size += chunk_size_len + 4; /* Size of encoded chunk size & filter mask */ + bt2_cparam.cls = H5D_BT2_FILT; + } /* end if */ + else + bt2_cparam.cls = H5D_BT2; + + bt2_cparam.node_size = idx_info->layout->u.btree2.cparam.node_size; + bt2_cparam.split_percent = idx_info->layout->u.btree2.cparam.split_percent; + bt2_cparam.merge_percent = idx_info->layout->u.btree2.cparam.merge_percent; + + u_ctx.f = idx_info->f; + u_ctx.ndims = idx_info->layout->ndims - 1; + u_ctx.chunk_size = idx_info->layout->size; + u_ctx.dim = idx_info->layout->dim; + + /* Create the v2 B-tree for the chunked dataset */ + if(NULL == (idx_info->storage->u.btree2.bt2 = H5B2_create(idx_info->f, idx_info->dxpl_id, &bt2_cparam, &u_ctx))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create v2 B-tree for tracking chunked dataset") + + /* Retrieve the v2 B-tree's address in the file */ + if(H5B2_get_addr(idx_info->storage->u.btree2.bt2, &(idx_info->storage->idx_addr)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get v2 B-tree address for tracking chunked dataset") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_create() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static hbool_t +H5D__bt2_idx_is_space_alloc(const H5O_storage_chunk_t *storage) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + + FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr)) +} /* end H5D__bt2_idx_is_space_alloc() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_mod_cb + * + * Purpose: Modify record for dataset chunk when it is found in a v2 B-tree. + * This is the callback for H5B2_modify() which is called in + * H5D__bt2_idx_insert(). + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_mod_cb(void *_record, void *_op_data, hbool_t *changed) +{ + H5D_bt2_ud_t *op_data = (H5D_bt2_ud_t *)_op_data; /* User data for v2 B-tree calls */ + H5D_chunk_rec_t *record = (H5D_chunk_rec_t *)_record; /* Chunk record */ + + FUNC_ENTER_STATIC_NOERR + +/* Sanity check */ +#ifndef NDEBUG +{ + unsigned u; /* Local index variable */ + + for(u = 0; u < op_data->ndims; u++) + HDassert(record->scaled[u] == op_data->rec.scaled[u]); +} +#endif /* NDEBUG */ + + /* Modify record */ + *record = op_data->rec; + + /* Note that the record changed */ + *changed = TRUE; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__bt2_mod_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_insert + * + * Purpose: Insert chunk address into the indexing structure. + * A non-filtered chunk: + * Should not exist + * Allocate the chunk and pass chunk address back up + * A filtered chunk: + * If it was not found, create the chunk and pass chunk address back up + * If it was found but its size changed, reallocate the chunk and pass chunk address back up + * If it was found but its size was the same, pass chunk address back up + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, + const H5D_t H5_ATTR_UNUSED *dset) +{ + H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */ + H5D_bt2_ud_t bt2_udata; /* User data for v2 B-tree calls */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + HDassert(H5F_addr_defined(udata->chunk_block.offset)); + + /* Check if the v2 B-tree is open yet */ + if(NULL == idx_info->storage->u.btree2.bt2) { + /* Open existing v2 B-tree */ + if(H5D__bt2_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree") + } /* end if */ + else /* Patch the top level file pointer contained in bt2 if needed */ + if(H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer") + + /* Set convenience pointer to v2 B-tree structure */ + bt2 = idx_info->storage->u.btree2.bt2; + + /* Set up callback info */ + bt2_udata.ndims = idx_info->layout->ndims - 1; + bt2_udata.rec.chunk_addr = udata->chunk_block.offset; + if(idx_info->pline->nused > 0) { /* filtered chunk */ + H5_CHECKED_ASSIGN(bt2_udata.rec.nbytes, uint32_t, udata->chunk_block.length, hsize_t); + bt2_udata.rec.filter_mask = udata->filter_mask; + } /* end if */ + else { /* non-filtered chunk */ + bt2_udata.rec.nbytes = idx_info->layout->size; + bt2_udata.rec.filter_mask = 0; + } /* end else */ + for(u = 0; u < (idx_info->layout->ndims - 1); u++) + bt2_udata.rec.scaled[u] = udata->common.scaled[u]; + + /* Update record for v2 B-tree (could be insert or modify) */ + if(H5B2_update(bt2, idx_info->dxpl_id, &bt2_udata, H5D__bt2_mod_cb, &bt2_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTUPDATE, FAIL, "unable to update record in v2 B-tree") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_idx_insert() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_found_cb + * + * Purpose: Retrieve record for dataset chunk when it is found in a v2 B-tree. + * This is the callback for H5B2_find() which is called in + * H5D__bt2_idx_get_addr() and H5D__bt2_idx_insert(). + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_found_cb(const void *nrecord, void *op_data) +{ + FUNC_ENTER_STATIC_NOERR + + *(H5D_chunk_rec_t *)op_data = *(const H5D_chunk_rec_t *)nrecord; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* H5D__bt2_found_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_get_addr + * + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) +{ + H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */ + H5D_bt2_ud_t bt2_udata; /* User data for v2 B-tree calls */ + H5D_chunk_rec_t found_rec; /* Record found from searching for object */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->layout->ndims > 0); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the v2 B-tree is open yet */ + if(NULL == idx_info->storage->u.btree2.bt2) { + /* Open existing v2 B-tree */ + if(H5D__bt2_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree") + } /* end if */ + else /* Patch the top level file pointer contained in bt2 if needed */ + if(H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer") + + /* Set convenience pointer to v2 B-tree structure */ + bt2 = idx_info->storage->u.btree2.bt2; + + /* Clear the found record */ + found_rec.chunk_addr = HADDR_UNDEF; + found_rec.nbytes = 0; + found_rec.filter_mask = 0; + + /* Prepare user data for compare callback */ + bt2_udata.rec.chunk_addr = HADDR_UNDEF; + bt2_udata.ndims = idx_info->layout->ndims - 1; + + /* Set the chunk offset to be searched for */ + for(u = 0; u < (idx_info->layout->ndims - 1); u++) + bt2_udata.rec.scaled[u] = udata->common.scaled[u]; + + /* Go get chunk information from v2 B-tree */ + if(H5B2_find(bt2, idx_info->dxpl_id, &bt2_udata, H5D__bt2_found_cb, &found_rec) < 0) + HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "can't find object in v2 B-tree") + + /* Set common info for the chunk */ + udata->chunk_block.offset = found_rec.chunk_addr; + + /* Check for setting other info */ + if(H5F_addr_defined(udata->chunk_block.offset)) { + /* Sanity check */ + HDassert(0 != found_rec.nbytes); + + /* Set other info for the chunk */ + if(idx_info->pline->nused > 0) { /* filtered chunk */ + udata->chunk_block.length = found_rec.nbytes; + udata->filter_mask = found_rec.filter_mask; + } /* end if */ + else { /* non-filtered chunk */ + udata->chunk_block.length = idx_info->layout->size; + udata->filter_mask = 0; + } /* end else */ + } /* end if */ + else { + udata->chunk_block.length = 0; + udata->filter_mask = 0; + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_idx_get_addr() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_iterate_cb + * + * Purpose: Translate the B-tree specific chunk record into a generic + * form and make the callback to the generic chunk callback + * routine. + * This is the callback for H5B2_iterate() which is called in + * H5D__bt2_idx_iterate(). + * + * Return: Success: Non-negative + * Failure: Negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static int +H5D__bt2_idx_iterate_cb(const void *_record, void *_udata) +{ + H5D_bt2_it_ud_t *udata = (H5D_bt2_it_ud_t *)_udata; /* User data */ + const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* Native record */ + int ret_value = -1; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Make "generic chunk" callback */ + if((ret_value = (udata->cb)(record, udata->udata)) < 0) + HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback"); + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_idx_iterate_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_iterate + * + * Purpose: Iterate over the chunks in an index, making a callback + * for each one. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static int +H5D__bt2_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata) +{ + H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */ + H5D_bt2_it_ud_t udata; /* User data for B-tree iterator callback */ + int ret_value = FAIL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(chunk_cb); + HDassert(chunk_udata); + + /* Check if the v2 B-tree is open yet */ + if(NULL == idx_info->storage->u.btree2.bt2) { + /* Open existing v2 B-tree */ + if(H5D__bt2_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree") + } /* end if */ + else /* Patch the top level file pointer contained in bt2 if needed */ + if(H5B2_patch_file(idx_info->storage->u.btree2.bt2, idx_info->f) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't patch v2 B-tree file pointer") + + /* Set convenience pointer to v2 B-tree structure */ + bt2 = idx_info->storage->u.btree2.bt2; + + /* Prepare user data for iterate callback */ + udata.cb = chunk_cb; + udata.udata = chunk_udata; + + /* Iterate over the records in the v2 B-tree */ + if((ret_value = H5B2_iterate(bt2, idx_info->dxpl_id, H5D__bt2_idx_iterate_cb, &udata)) < 0) + HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over chunk v2 B-tree"); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_iterate() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_remove_cb() + * + * Purpose: Free space for 'dataset chunk' object as v2 B-tree + * is being deleted or v2 B-tree node is removed. + * This is the callback for H5B2_remove() and H5B2_delete() which + * which are called in H5D__bt2_idx_remove() and H5D__bt2_idx_delete(). + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_remove_cb(const void *_record, void *_udata) +{ + const H5D_chunk_rec_t *record = (const H5D_chunk_rec_t *)_record; /* The native record */ + H5D_bt2_remove_ud_t *udata = (H5D_bt2_remove_ud_t *)_udata; /* User data for removal callback */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(udata); + HDassert(udata->f); + + /* Free the space in the file for the object being removed */ + H5_CHECK_OVERFLOW(record->nbytes, uint32_t, hsize_t); + if(H5MF_xfree(udata->f, H5FD_MEM_DRAW, udata->dxpl_id, record->chunk_addr, (hsize_t)record->nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_remove_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_remove + * + * Purpose: Remove chunk from index. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata) +{ + H5B2_t *bt2; /* v2 B-tree handle for indexing chunks */ + H5D_bt2_remove_ud_t remove_udata; /* User data for removal callback */ + H5D_bt2_ud_t bt2_udata; /* User data for v2 B-tree find call */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the v2 B-tree is open yet */ + if(NULL == idx_info->storage->u.btree2.bt2) + /* Open existing v2 B-tree */ + if(H5D__bt2_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree") + + /* Set convenience pointer to v2 B-tree structure */ + bt2 = idx_info->storage->u.btree2.bt2; + + /* Initialize user data for removal callback */ + remove_udata.f = idx_info->f; + remove_udata.dxpl_id = idx_info->dxpl_id; + + /* Prepare user data for compare callback */ + bt2_udata.ndims = idx_info->layout->ndims - 1; + + /* Initialize the record to search for */ + for(u = 0; u < (idx_info->layout->ndims - 1); u++) + bt2_udata.rec.scaled[u] = udata->scaled[u]; + + /* Remove the record for the "dataset chunk" object from the v2 B-tree */ + /* (space in the file for the object is freed in the 'remove' callback) */ + if(H5B2_remove(bt2, idx_info->dxpl_id, &bt2_udata, H5D__bt2_remove_cb, &remove_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "can't remove object from B-tree") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__bt2_idx_remove() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_delete + * + * Purpose: Delete index and raw data storage for entire dataset + * (i.e. all chunks) + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + * Modifications: + * Vailin Choi; March 2011 + * Initialize size of an unfiltered chunk. + * This is a fix for for the assertion failure in: + * [src/H5FSsection.c:968: H5FS_sect_link_size: Assertion `bin < sinfo->nbins' failed.] + * which is uncovered by test_unlink_chunked_dataset() in test/unlink.c + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_delete(const H5D_chk_idx_info_t *idx_info) +{ + H5D_bt2_remove_ud_t remove_udata; /* User data for removal callback */ + H5B2_remove_t remove_op; /* The removal callback */ + H5D_bt2_ctx_ud_t u_ctx; /* data for context call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + + /* Check if the index data structure has been allocated */ + if(H5F_addr_defined(idx_info->storage->idx_addr)) { + /* Set up user data for creating context */ + u_ctx.f = idx_info->f; + u_ctx.ndims = idx_info->layout->ndims - 1; + u_ctx.chunk_size = idx_info->layout->size; + u_ctx.dim = idx_info->layout->dim; + + /* Initialize user data for removal callback */ + remove_udata.f = idx_info->f; + remove_udata.dxpl_id = idx_info->dxpl_id; + + /* Set remove operation. */ + remove_op = H5D__bt2_remove_cb; + + /* Delete the v2 B-tree */ + /*(space in the file for each object is freed in the 'remove' callback) */ + if(H5B2_delete(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &u_ctx, remove_op, &remove_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "can't delete v2 B-tree") + + idx_info->storage->idx_addr = HADDR_UNDEF; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_delete() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_copy_setup + * + * Purpose: Set up any necessary information for copying chunks + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Source file */ + HDassert(idx_info_src); + HDassert(idx_info_src->f); + HDassert(idx_info_src->pline); + HDassert(idx_info_src->layout); + HDassert(idx_info_src->storage); + + /* Destination file */ + HDassert(idx_info_dst); + HDassert(idx_info_dst->f); + HDassert(idx_info_dst->pline); + HDassert(idx_info_dst->layout); + HDassert(idx_info_dst->storage); + HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr)); + + /* Check if the source v2 B-tree is open yet */ + if(NULL == idx_info_src->storage->u.btree2.bt2) + if(H5D__bt2_idx_open(idx_info_src) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree") + + /* Set copied metadata tag */ + H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL); + + /* Create v2 B-tree that describes the chunked dataset in the destination file */ + if(H5D__bt2_idx_create(idx_info_dst) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") + HDassert(H5F_addr_defined(idx_info_dst->storage->idx_addr)); + + /* Reset metadata tag */ + H5_END_TAG(FAIL); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_copy_setup() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_copy_shutdown + * + * Purpose: Shutdown any information from copying chunks + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, + H5O_storage_chunk_t *storage_dst, hid_t H5_ATTR_UNUSED dxpl_id) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(storage_src); + HDassert(storage_src->u.btree2.bt2); + HDassert(storage_dst); + HDassert(storage_dst->u.btree2.bt2); + + /* Close v2 B-tree for source file */ + if(H5B2_close(storage_src->u.btree2.bt2, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree") + storage_src->u.btree2.bt2 = NULL; + + /* Close v2 B-tree for destination file */ + if(H5B2_close(storage_dst->u.btree2.bt2, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close v2 B-tree") + storage_dst->u.btree2.bt2 = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_copy_shutdown() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_size + * + * Purpose: Retrieve the amount of index storage for chunked dataset + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) +{ + H5B2_t *bt2_cdset = NULL; /* Pointer to v2 B-tree structure */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(index_size); + + /* Open v2 B-tree */ + if(H5D__bt2_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open v2 B-tree") + + /* Set convenience pointer to v2 B-tree structure */ + bt2_cdset = idx_info->storage->u.btree2.bt2; + + /* Get v2 B-tree size for indexing chunked dataset */ + if(H5B2_size(bt2_cdset, idx_info->dxpl_id, index_size) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't retrieve v2 B-tree storage info for chunked dataset") + +done: + /* Close v2 B-tree index */ + if(bt2_cdset && H5B2_close(bt2_cdset, idx_info->dxpl_id) < 0) + HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, FAIL, "can't close v2 B-tree for tracking chunked dataset") + idx_info->storage->u.btree2.bt2 = NULL; + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_size() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_reset + * + * Purpose: Reset indexing information. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr) +{ + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(storage); + + /* Reset index info */ + if(reset_addr) + storage->idx_addr = HADDR_UNDEF; + storage->u.btree2.bt2 = NULL; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__bt2_idx_reset() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_dump + * + * Purpose: Dump indexing information to a stream. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) +{ + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(storage); + HDassert(stream); + + HDfprintf(stream, " Address: %a\n", storage->idx_addr); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__bt2_idx_dump() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__bt2_idx_dest + * + * Purpose: Release indexing information in memory. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; June 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__bt2_idx_dest(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->storage); + + /* Check if the v2-btree is open */ + if(idx_info->storage->u.btree2.bt2) { + /* Close v2 B-tree */ + if(H5B2_close(idx_info->storage->u.btree2.bt2, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "can't close v2 B-tree") + idx_info->storage->u.btree2.bt2 = NULL; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__bt2_idx_dest() */ + diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index da3959c..7c4eb15 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -75,7 +75,10 @@ /* Sanity check on chunk index types: commonly used by a lot of routines in this file */ #define H5D_CHUNK_STORAGE_INDEX_CHK(storage) \ - HDassert((H5D_CHUNK_IDX_BTREE == storage->idx_type && H5D_COPS_BTREE == storage->ops)); + HDassert((H5D_CHUNK_IDX_EARRAY == storage->idx_type && H5D_COPS_EARRAY == storage->ops) || \ + (H5D_CHUNK_IDX_FARRAY == storage->idx_type && H5D_COPS_FARRAY == storage->ops) || \ + (H5D_CHUNK_IDX_BT2 == storage->idx_type && H5D_COPS_BT2 == storage->ops) || \ + (H5D_CHUNK_IDX_BTREE == storage->idx_type && H5D_COPS_BTREE == storage->ops)); /* * Feature: If this constant is defined then every cache preemption and load @@ -103,6 +106,11 @@ /*#define H5D_CHUNK_DEBUG */ +/* Flags for the "edge_chunk_state" field below */ +#define H5D_RDCC_DISABLE_FILTERS 0x01u /* Disable filters on this chunk */ +#define H5D_RDCC_NEWLY_DISABLED_FILTERS 0x02u /* Filters have been disabled since + * the last flush */ + /******************/ /* Local Typedefs */ @@ -113,6 +121,7 @@ typedef struct H5D_rdcc_ent_t { hbool_t locked; /*entry is locked in cache */ hbool_t dirty; /*needs to be written to disk? */ hbool_t deleted; /*chunk about to be deleted */ + unsigned edge_chunk_state; /*states related to edge chunks (see above) */ hsize_t scaled[H5O_LAYOUT_NDIMS]; /*scaled chunk 'name' (coordinates) */ uint32_t rd_count; /*bytes remaining to be read */ uint32_t wr_count; /*bytes remaining to be written */ @@ -174,6 +183,8 @@ typedef struct H5D_chunk_it_ud3_t { /* needed for compressed variable-length data */ const H5O_pline_t *pline; /* Filter pipeline */ + unsigned dset_ndims; /* Number of dimensions in dataset */ + const hsize_t *dset_dims; /* Dataset dimensions */ /* needed for copy object pointed by refs */ H5O_copy_t *cpy_info; /* Copy options */ @@ -255,17 +266,18 @@ static herr_t H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t reset); static herr_t H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, H5D_rdcc_ent_t *ent, hbool_t flush); +static hbool_t H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, + const uint32_t *chunk_dims, const hsize_t *chunk_scaled, const hsize_t *dset_dims); static void *H5D__chunk_lock(const H5D_io_info_t *io_info, - H5D_chunk_ud_t *udata, hbool_t relax); + H5D_chunk_ud_t *udata, hbool_t relax, hbool_t prev_unfilt_chunk); static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, hbool_t dirty, void *chunk, uint32_t naccessed); static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *dxpl_cache, size_t size); -static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata); +static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk); static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, - const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert, - hsize_t scaled[]); + const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert); #ifdef H5_HAVE_PARALLEL static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id, H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf); @@ -411,7 +423,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, /* Create the chunk it if it doesn't exist, or reallocate the chunk * if its size changed. */ - if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0) + if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk") /* Make sure the address of the chunk is returned. */ @@ -539,6 +551,69 @@ done: /*------------------------------------------------------------------------- + * Function: H5D__chunk_set_sizes + * + * Purpose: Sets chunk and type sizes. + * + * Return: SUCCEED/FAIL + * + * Programmer: Dana Robinson + * December 2015 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__chunk_set_sizes(H5D_t *dset) +{ + uint64_t chunk_size; /* Size of chunk in bytes */ + unsigned max_enc_bytes_per_dim; /* Max. number of bytes required to encode this dimension */ + unsigned u; /* Iterator */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(dset); + + /* Increment # of chunk dimensions, to account for datatype size as last element */ + dset->shared->layout.u.chunk.ndims++; + + /* Set the last dimension of the chunk size to the size of the datatype */ + dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = (uint32_t)H5T_GET_SIZE(dset->shared->type); + + /* Compute number of bytes to use for encoding chunk dimensions */ + max_enc_bytes_per_dim = 0; + for(u = 0; u < (unsigned)dset->shared->layout.u.chunk.ndims; u++) { + unsigned enc_bytes_per_dim; /* Number of bytes required to encode this dimension */ + + /* Get encoded size of dim, in bytes */ + enc_bytes_per_dim = (H5VM_log2_gen(dset->shared->layout.u.chunk.dim[u]) + 8) / 8; + + /* Check if this is the largest value so far */ + if(enc_bytes_per_dim > max_enc_bytes_per_dim) + max_enc_bytes_per_dim = enc_bytes_per_dim; + } /* end for */ + HDassert(max_enc_bytes_per_dim > 0 && max_enc_bytes_per_dim <= 8); + dset->shared->layout.u.chunk.enc_bytes_per_dim = max_enc_bytes_per_dim; + + /* Compute and store the total size of a chunk */ + /* (Use 64-bit value to ensure that we can detect >4GB chunks) */ + for(u = 1, chunk_size = (uint64_t)dset->shared->layout.u.chunk.dim[0]; u < dset->shared->layout.u.chunk.ndims; u++) + chunk_size *= (uint64_t)dset->shared->layout.u.chunk.dim[u]; + + /* Check for chunk larger than can be represented in 32-bits */ + /* (Chunk size is encoded in 32-bit value in v1 B-tree records) */ + if(chunk_size > (uint64_t)0xffffffff) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB") + + H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__chunk_set_sizes */ + + +/*------------------------------------------------------------------------- * Function: H5D__chunk_construct * * Purpose: Constructs new chunked layout information for dataset @@ -553,8 +628,6 @@ done: static herr_t H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset) { - const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */ - uint64_t chunk_size; /* Size of chunk in bytes */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -567,22 +640,18 @@ H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset) /* Check for invalid chunk dimension rank */ if(0 == dset->shared->layout.u.chunk.ndims) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "no chunk information set?") - - /* Set up layout information */ if(dset->shared->layout.u.chunk.ndims != dset->shared->ndims) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimensionality of chunks doesn't match the dataspace") - /* Increment # of chunk dimensions, to account for datatype size as last element */ - dset->shared->layout.u.chunk.ndims++; + /* Set chunk sizes */ + if(H5D__chunk_set_sizes(dset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes") HDassert((unsigned)(dset->shared->layout.u.chunk.ndims) <= NELMTS(dset->shared->layout.u.chunk.dim)); /* Chunked storage is not compatible with external storage (currently) */ if(dset->shared->dcpl_cache.efl.nused > 0) HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "external storage not supported with chunked layout") - /* Set the last dimension of the chunk size to the size of the datatype */ - dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = (uint32_t)H5T_GET_SIZE(type); - /* Sanity check dimensions */ for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) { /* Don't allow zero-sized chunk dimensions */ @@ -598,19 +667,6 @@ H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be <= maximum dimension size for fixed-sized dimensions") } /* end for */ - /* Compute the total size of a chunk */ - /* (Use 64-bit value to ensure that we can detect >4GB chunks) */ - for(u = 1, chunk_size = (uint64_t)dset->shared->layout.u.chunk.dim[0]; u < dset->shared->layout.u.chunk.ndims; u++) - chunk_size *= (uint64_t)dset->shared->layout.u.chunk.dim[u]; - - /* Check for chunk larger than can be represented in 32-bits */ - /* (Chunk size is encoded in 32-bit value in v1 B-tree records) */ - if(chunk_size > (uint64_t)0xffffffff) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB") - - /* Retain computed chunk size */ - H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t); - /* Reset address and pointer of the array struct for the chunked storage index */ if(H5D_chunk_idx_reset(&dset->shared->layout.storage.u.chunk, TRUE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to reset chunked storage index") @@ -1788,6 +1844,7 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_info_t *dset_info, haddr_t caddr, hbool_t write_op) { const H5D_t *dataset = NULL; /* Local pointer to dataset */ + hbool_t has_filters = FALSE; /* Whether there are filters on the chunk or not */ htri_t ret_value = FAIL; /* Return value */ FUNC_ENTER_PACKAGE @@ -1797,8 +1854,23 @@ H5D__chunk_cacheable(const H5D_io_info_t *io_info, H5D_dset_info_t *dset_info, dataset = dset_info->dset; HDassert(dataset); - /* Must bring the whole chunk in if there are any filters */ - if(dataset->shared->dcpl_cache.pline.nused > 0) + /* Must bring the whole chunk in if there are any filters on the chunk. + * Make sure to check if filters are on the dataset but disabled for the + * chunk because it is a partial edge chunk. */ + if(dataset->shared->dcpl_cache.pline.nused > 0) { + if(dataset->shared->layout.u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { + has_filters = !H5D__chunk_is_partial_edge_chunk( + dataset->shared->ndims, + dataset->shared->layout.u.chunk.dim, + dset_info->store->chunk.scaled, + dataset->shared->curr_dims); + } /* end if */ + else + has_filters = TRUE; + } /* end if */ + + if(has_filters) ret_value = TRUE; else { #ifdef H5_HAVE_PARALLEL @@ -1978,7 +2050,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, dset_info->store->chunk.scaled = chunk_info->scaled; /* Lock the chunk into the cache */ - if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE))) + if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") /* Set up the storage buffer information for this chunk */ @@ -2122,7 +2194,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, dset_info->store->chunk.scaled = chunk_info->scaled; /* Lock the chunk into the cache */ - if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk))) + if(NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE))) HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") /* Set up the storage buffer information for this chunk */ @@ -2145,7 +2217,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, udata.chunk_block.length = dset_info->dset->shared->layout.u.chunk.size; /* Allocate the chunk */ - if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, chunk_info->scaled) < 0) + if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") /* Make sure the address of the chunk is returned. */ @@ -2651,6 +2723,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, udata->chunk_block.offset = HADDR_UNDEF; udata->chunk_block.length = 0; udata->filter_mask = 0; + udata->new_unfilt_chunk = FALSE; /* Check for chunk in cache */ if(dset->shared->cache.chunk.nslots > 0) { @@ -2779,7 +2852,8 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t udata.chunk_idx = ent->chunk_idx; /* Should the chunk be filtered before writing it to disk? */ - if(dset->shared->dcpl_cache.pline.nused) { + if(dset->shared->dcpl_cache.pline.nused + && !(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)) { size_t alloc = udata.chunk_block.length; /* Bytes allocated for BUF */ size_t nbytes; /* Chunk size (in bytes) */ @@ -2818,10 +2892,27 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t /* Indicate that the chunk must be allocated */ must_alloc = TRUE; } /* end if */ - else if(!H5F_addr_defined(udata.chunk_block.offset)) + else if(!H5F_addr_defined(udata.chunk_block.offset)) { /* Indicate that the chunk must be allocated */ must_alloc = TRUE; + /* This flag could be set for this chunk, just remove and ignore it + */ + ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS; + } /* end else */ + else if(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS) { + /* Chunk on disk is still filtered, must insert to allocate correct + * size */ + must_alloc = TRUE; + + /* Set the disable filters field back to the standard disable + * filters setting, as it no longer needs to be inserted with every + * flush */ + ent->edge_chunk_state &= ~H5D_RDCC_NEWLY_DISABLED_FILTERS; + } /* end else */ + + HDassert(!(ent->edge_chunk_state & H5D_RDCC_NEWLY_DISABLED_FILTERS)); + /* Check if the chunk needs to be allocated (it also could exist already * and the chunk alloc operation could resize it) */ @@ -2836,7 +2927,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t /* Create the chunk it if it doesn't exist, or reallocate the chunk * if its size changed. */ - if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, ent->scaled) < 0) + if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") /* Update the chunk entry's info, in case it was allocated or relocated */ @@ -2871,7 +2962,9 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t if(buf == ent->chunk) buf = NULL; if(ent->chunk != NULL) - ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline)); + ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, + ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL + : &(dset->shared->dcpl_cache.pline))); } /* end if */ done: @@ -2887,7 +2980,9 @@ done: */ if(ret_value < 0 && point_of_no_return) if(ent->chunk) - ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline)); + ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, + ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL + : &(dset->shared->dcpl_cache.pline))); FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) } /* end H5D__chunk_flush_entry() */ @@ -2929,7 +3024,9 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t else { /* Don't flush, just free chunk */ if(ent->chunk != NULL) - ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline)); + ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, + ((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL + : &(dset->shared->dcpl_cache.pline))); } /* end else */ /* Unlink from list */ @@ -3108,10 +3205,11 @@ done: */ static void * H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, - hbool_t relax) + hbool_t relax, hbool_t prev_unfilt_chunk) { const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to the dataset info */ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info - always equal to the pline passed to H5D__chunk_mem_alloc */ + const H5O_pline_t *old_pline = pline; /* Old pipeline, i.e. pipeline used to read the chunk */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ @@ -3119,6 +3217,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache*/ H5D_rdcc_ent_t *ent; /*cache entry */ size_t chunk_size; /*size of a chunk */ + hbool_t disable_filters = FALSE; /* Whether to disable filters (when adding to cache) */ void *chunk = NULL; /*the file chunk */ void *ret_value = NULL; /* Return value */ @@ -3131,6 +3230,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, HDassert(dset); HDassert(TRUE == H5P_isa_class(io_info->md_dxpl_id, H5P_DATASET_XFER)); HDassert(TRUE == H5P_isa_class(io_info->raw_dxpl_id, H5P_DATASET_XFER)); + HDassert(!(udata->new_unfilt_chunk && prev_unfilt_chunk)); HDassert(!rdcc->tmp_head); /* Get the chunk's size */ @@ -3161,6 +3261,66 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, */ rdcc->stats.nhits++; + /* Make adjustments if the edge chunk status changed recently */ + if(pline->nused) { + /* If the chunk recently became an unfiltered partial edge chunk + * while in cache, we must make some changes to the entry */ + if(udata->new_unfilt_chunk) { + /* If this flag is set then partial chunk filters must be + * disabled, and the chunk must not have previously been a + * partial chunk (with disabled filters) */ + HDassert(layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + HDassert(!(ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)); + HDassert(old_pline->nused); + + /* Disable filters. Set pline to NULL instead of just the + * default pipeline to make a quick failure more likely if the + * code is changed in an inappropriate/incomplete way. */ + pline = NULL; + + /* Reallocate the chunk so H5D__chunk_mem_xfree doesn't get confused + */ + if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") + HDmemcpy(chunk, ent->chunk, chunk_size); + ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline); + ent->chunk = (uint8_t *)chunk; + chunk = NULL; + + /* Mark the chunk as having filters disabled as well as "newly + * disabled" so it is inserted on flush */ + ent->edge_chunk_state |= H5D_RDCC_DISABLE_FILTERS; + ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; + } /* end if */ + else if(prev_unfilt_chunk) { + /* If this flag is set then partial chunk filters must be + * disabled, and the chunk must have previously been a partial + * chunk (with disabled filters) */ + HDassert(layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + HDassert((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS)); + HDassert(pline->nused); + + /* Mark the old pipeline as having been disabled */ + old_pline = NULL; + + /* Reallocate the chunk so H5D__chunk_mem_xfree doesn't get confused + */ + if(NULL == (chunk = H5D__chunk_mem_alloc(chunk_size, pline))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") + HDmemcpy(chunk, ent->chunk, chunk_size); + + ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk, old_pline); + ent->chunk = (uint8_t *)chunk; + chunk = NULL; + + /* Mark the chunk as having filters enabled */ + ent->edge_chunk_state &= ~(H5D_RDCC_DISABLE_FILTERS + | H5D_RDCC_NEWLY_DISABLED_FILTERS); + } /* end else */ + } /* end if */ + /* * If the chunk is not at the beginning of the cache; move it backward * by one slot. This is how we implement the LRU preemption @@ -3189,6 +3349,39 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, chunk_addr = udata->chunk_block.offset; chunk_alloc = udata->chunk_block.length; + /* Check if we should disable filters on this chunk */ + if(pline->nused) { + if(udata->new_unfilt_chunk) { + HDassert(layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + + /* Disable the filters for writing */ + disable_filters = TRUE; + pline = NULL; + } /* end if */ + else if(prev_unfilt_chunk) { + HDassert(layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + + /* Mark the filters as having been previously disabled (for the + * chunk as currently on disk) - disable the filters for reading + */ + old_pline = NULL; + } /* end if */ + else if(layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { + /* Check if this is an edge chunk */ + if(H5D__chunk_is_partial_edge_chunk(dset->shared->ndims, + layout->u.chunk.dim, io_info->dsets_info[0].store->chunk.scaled, + dset->shared->curr_dims)) { + /* Disable the filters for both writing and reading */ + disable_filters = TRUE; + old_pline = NULL; + pline = NULL; + } /* end if */ + } /* end if */ + } /* end if */ + if(relax) { /* * Not in the cache, but we're about to overwrite the whole thing @@ -3218,16 +3411,32 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, /* Chunk size on disk isn't [likely] the same size as the final chunk * size in memory, so allocate memory big enough. */ - if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline))) + if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, (udata->new_unfilt_chunk ? old_pline : pline)))) HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->raw_dxpl_id, chunk) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk") - if(pline->nused) - if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect, - io_info->dxpl_cache->filter_cb, &my_chunk_alloc, &buf_alloc, &chunk) < 0) + if(old_pline && old_pline->nused) { + if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE, + &(udata->filter_mask), + io_info->dxpl_cache->err_detect, + io_info->dxpl_cache->filter_cb, + &my_chunk_alloc, &buf_alloc, &chunk) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed") + /* Reallocate chunk if necessary */ + if(udata->new_unfilt_chunk) { + void *tmp_chunk = chunk; + + if(NULL == (chunk = H5D__chunk_mem_alloc(my_chunk_alloc, pline))) { + (void)H5D__chunk_mem_xfree(tmp_chunk, old_pline); + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk") + } /* end if */ + HDmemcpy(chunk, tmp_chunk, chunk_size); + (void)H5D__chunk_mem_xfree(tmp_chunk, old_pline); + } /* end if */ + } /* end if */ + /* Increment # of cache misses */ rdcc->stats.nmisses++; } /* end if */ @@ -3296,6 +3505,10 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry") + ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0; + if(udata->new_unfilt_chunk) + ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; + /* Initialize the new entry */ ent->chunk_block.offset = chunk_addr; ent->chunk_block.length = chunk_alloc; @@ -3391,6 +3604,7 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, { const H5O_layout_t *layout = &(io_info->dsets_info[0].dset->shared->layout); /* Dataset layout */ const H5D_rdcc_t *rdcc = &(io_info->dsets_info[0].dset->shared->cache.chunk); + const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to the dataset info */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -3404,12 +3618,30 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, * It's not in the cache, probably because it's too big. If it's * dirty then flush it to disk. In any case, free the chunk. */ + hbool_t is_unfiltered_edge_chunk = FALSE; /* Whether the chunk is an unfiltered edge chunk */ + + /* Check if we should disable filters on this chunk */ + if(udata->new_unfilt_chunk) { + HDassert(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + + is_unfiltered_edge_chunk = TRUE; + } /* end if */ + else if(layout->u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { + /* Check if the chunk is an edge chunk, and disable filters if so */ + is_unfiltered_edge_chunk = H5D__chunk_is_partial_edge_chunk( + dset->shared->ndims, layout->u.chunk.dim, + io_info->dsets_info[0].store->chunk.scaled, dset->shared->curr_dims); + } /* end if */ + if(dirty) { H5D_rdcc_ent_t fake_ent; /* "fake" chunk cache entry */ HDmemset(&fake_ent, 0, sizeof(fake_ent)); fake_ent.dirty = TRUE; - HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); + if(is_unfiltered_edge_chunk) + fake_ent.edge_chunk_state = H5D_RDCC_DISABLE_FILTERS; + if(udata->new_unfilt_chunk) + fake_ent.edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; HDmemcpy(fake_ent.scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); HDassert(layout->u.chunk.size > 0); fake_ent.chunk_idx = udata->chunk_idx; @@ -3422,7 +3654,8 @@ H5D__chunk_unlock(const H5D_io_info_t *io_info, } /* end if */ else { if(chunk) - chunk = H5D__chunk_mem_xfree(chunk, &(io_info->dsets_info[0].dset->shared->dcpl_cache.pline)); + chunk = H5D__chunk_mem_xfree(chunk, (is_unfiltered_edge_chunk ? NULL + : &(io_info->dsets_info[0].dset->shared->dcpl_cache.pline))); } /* end else */ } /* end if */ else { @@ -3568,9 +3801,12 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ unsigned filter_mask = 0; /* Filter mask for chunks that have them */ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ + const H5O_pline_t def_pline = H5O_CRT_PIPELINE_DEF; /* Default pipeline */ const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */ H5D_fill_value_t fill_status; /* The fill value status */ hbool_t should_fill = FALSE; /* Whether fill values should be written */ + void *unfilt_fill_buf = NULL; /* Unfiltered fill value buffer */ + void **fill_buf = NULL; /* Pointer to the fill buffer to use for a chunk */ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ #ifdef H5_HAVE_PARALLEL @@ -3587,6 +3823,10 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ unsigned op_dim; /* Current operating dimension */ H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */ hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */ + hbool_t has_unfilt_edge_chunks = FALSE; /* Whether there are partial edge chunks with disabled filters */ + hbool_t unfilt_edge_chunk_dim[H5O_LAYOUT_NDIMS]; /* Whether there are unfiltered edge chunks at the edge of each dimension */ + hsize_t edge_chunk_scaled[H5O_LAYOUT_NDIMS]; /* Offset of the unfiltered edge chunks at the edge of each dimension */ + unsigned nunfilt_edge_chunk_dims = 0; /* Number of dimensions on an edge */ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -3631,6 +3871,29 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ if(H5D__get_dxpl_cache(raw_dxpl_id, &dxpl_cache) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + /* Calculate the minimum and maximum chunk offsets in each dimension, and + * determine if there are any unfiltered partial edge chunks. Note that we + * assume here that all elements of space_dim are > 0. This is checked at + * the top of this function. */ + for(op_dim=0; op_dim<space_ndims; op_dim++) { + min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim]; + max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim]; + + /* Calculate if there are unfiltered edge chunks at the edge of this + * dimension. Note the edge_chunk_scaled is uninitialized for + * dimensions where unfilt_edge_chunk_dim is FALSE. Also */ + if((layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) + && pline->nused > 0 + && space_dim[op_dim] % chunk_dim[op_dim] != 0) { + has_unfilt_edge_chunks = TRUE; + unfilt_edge_chunk_dim[op_dim] = TRUE; + edge_chunk_scaled[op_dim] = max_unalloc[op_dim]; + } /* end if */ + else + unfilt_edge_chunk_dim[op_dim] = FALSE; + } /* end for */ + /* Get original chunk size */ H5_CHECKED_ASSIGN(orig_chunk_size, size_t, layout->u.chunk.size, uint32_t); @@ -3662,6 +3925,11 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info") fb_info_init = TRUE; + /* Initialize the fill_buf pointer to the buffer in fb_info. If edge + * chunk filters are disabled, we will switch the buffer as appropriate + * for each chunk. */ + fill_buf = &fb_info.fill_buf; + /* Check if there are filters which need to be applied to the chunk */ /* (only do this in advance when the chunk info can be re-used (i.e. * it doesn't contain any non-default VL datatype fill values) @@ -3669,6 +3937,14 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ if(!fb_info.has_vlen_fill_type && pline->nused > 0) { size_t buf_size = orig_chunk_size; + /* If the dataset has disabled partial chunk filters, create a copy + * of the unfiltered fill_buf to use for partial chunks */ + if(has_unfilt_edge_chunks) { + if(NULL == (unfilt_fill_buf = H5D__chunk_mem_alloc(orig_chunk_size, &def_pline))) + HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for raw data chunk") + HDmemcpy(unfilt_fill_buf, fb_info.fill_buf, orig_chunk_size); + } /* end if */ + /* Push the chunk through the filters */ if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &fb_info.fill_buf) < 0) HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed") @@ -3687,14 +3963,6 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ idx_info.layout = &dset->shared->layout.u.chunk; idx_info.storage = &dset->shared->layout.storage.u.chunk; - /* Calculate the minimum and maximum chunk offsets in each dimension. Note - * that we assume here that all elements of space_dim are > 0. This is - * checked at the top of this function. */ - for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) { - min_unalloc[op_dim] = (old_dim[op_dim] + chunk_dim[op_dim] - 1) / chunk_dim[op_dim]; - max_unalloc[op_dim] = (space_dim[op_dim] - 1) / chunk_dim[op_dim]; - } /* end for */ - /* Loop over all chunks */ /* The algorithm is: * For each dimension: @@ -3719,6 +3987,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ chunk_size = orig_chunk_size; for(op_dim = 0; op_dim < space_ndims; op_dim++) { H5D_chunk_ud_t udata; /* User data for querying chunk info */ + unsigned u; /* Local index variable */ int i; /* Local index variable */ /* Check if allocation along this dimension is really necessary */ @@ -3729,6 +3998,29 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ HDmemset(scaled, 0, (space_ndims * sizeof(scaled[0]))); scaled[op_dim] = min_unalloc[op_dim]; + if(has_unfilt_edge_chunks) { + /* Initialize nunfilt_edge_chunk_dims */ + nunfilt_edge_chunk_dims = 0; + for(u = 0; u < space_ndims; u++) + if(unfilt_edge_chunk_dim[u] && scaled[u] + == edge_chunk_scaled[u]) + nunfilt_edge_chunk_dims++; + + /* Initialize chunk_size and fill_buf */ + if(should_fill && !fb_info.has_vlen_fill_type) { + HDassert(fb_info_init); + HDassert(unfilt_fill_buf); + if(nunfilt_edge_chunk_dims) { + fill_buf = &unfilt_fill_buf; + chunk_size = layout->u.chunk.size; + } /* end if */ + else { + fill_buf = &fb_info.fill_buf; + chunk_size = orig_chunk_size; + } /* end else */ + } /* end if */ + } /* end if */ + carry = FALSE; } /* end else */ @@ -3744,12 +4036,12 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ /* Make sure the chunk is really in the dataset and outside the * original dimensions */ { - unsigned u; /* Local index variable */ + unsigned v; /* Local index variable */ hbool_t outside_orig = FALSE; - for(u = 0; u < space_ndims; u++) { - HDassert((scaled[u] * chunk_dim[u]) < space_dim[u]); - if((scaled[u] * chunk_dim[u]) >= old_dim[u]) + for(v = 0; v < space_ndims; v++) { + HDassert((scaled[v] * chunk_dim[v]) < space_dim[v]); + if((scaled[v] * chunk_dim[v]) >= old_dim[v]) outside_orig = TRUE; } /* end for */ HDassert(outside_orig); @@ -3760,6 +4052,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ if(fb_info_init && fb_info.has_vlen_fill_type) { /* Sanity check */ HDassert(should_fill); + HDassert(!unfilt_fill_buf); #ifdef H5_HAVE_PARALLEL HDassert(!using_mpi); /* Can't write VL datatypes in parallel currently */ #endif @@ -3778,7 +4071,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer") /* Check if there are filters which need to be applied to the chunk */ - if(pline->nused > 0) { + if((pline->nused > 0) && !nunfilt_edge_chunk_dims) { size_t nbytes = orig_chunk_size; /* Push the chunk through the filters */ @@ -3796,6 +4089,8 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ } /* end if */ else chunk_size = layout->u.chunk.size; + + HDassert(*fill_buf == fb_info.fill_buf); } /* end if */ /* Initialize the chunk information */ @@ -3807,7 +4102,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ udata.filter_mask = filter_mask; /* Allocate the chunk (with all processes) */ - if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, scaled) < 0) + if(H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") HDassert(H5F_addr_defined(udata.chunk_block.offset)); @@ -3823,6 +4118,9 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ /* collect all chunk addresses to be written to write collectively at the end */ /* allocate/resize address array if no more space left */ + /* Note that if we add support for parallel filters we must + * also store an array of chunk sizes and pass it to the + * apporpriate collective write function */ if(0 == chunk_info.num_io % 1024) if(NULL == (chunk_info.addr = (haddr_t *)H5MM_realloc(chunk_info.addr, (chunk_info.num_io + 1024) * sizeof(haddr_t)))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed for chunk addresses") @@ -3836,7 +4134,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ } /* end if */ else { #endif /* H5_HAVE_PARALLEL */ - if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, raw_dxpl_id, fb_info.fill_buf) < 0) + if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, chunk_size, raw_dxpl_id, *fill_buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file") #ifdef H5_HAVE_PARALLEL } /* end else */ @@ -3857,8 +4155,31 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ scaled[i] = min_unalloc[i]; else scaled[i] = 0; - } /* end if */ + + /* Check if we just left the edge in this dimension */ + if(unfilt_edge_chunk_dim[i] + && edge_chunk_scaled[i] == max_unalloc[i] + && scaled[i] < edge_chunk_scaled[i]) { + nunfilt_edge_chunk_dims--; + if(should_fill && nunfilt_edge_chunk_dims == 0 && !fb_info.has_vlen_fill_type) { + HDassert(!H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, scaled, space_dim)); + fill_buf = &fb_info.fill_buf; + chunk_size = orig_chunk_size; + } /* end if */ + } /* end if */ + } /* end if */ else { + /* Check if we just entered the edge in this dimension */ + if(unfilt_edge_chunk_dim[i] && scaled[i] == edge_chunk_scaled[i]) { + HDassert(edge_chunk_scaled[i] == max_unalloc[i]); + nunfilt_edge_chunk_dims++; + if(should_fill && nunfilt_edge_chunk_dims == 1 && !fb_info.has_vlen_fill_type) { + HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, scaled, space_dim)); + fill_buf = &unfilt_fill_buf; + chunk_size = layout->u.chunk.size; + } /* end if */ + } /* end if */ + carry = FALSE; break; } /* end else */ @@ -3889,6 +4210,9 @@ done: if(fb_info_init && H5D__fill_term(&fb_info) < 0) HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info") + /* Free the unfiltered fill value buffer */ + unfilt_fill_buf = H5D__chunk_mem_xfree(unfilt_fill_buf, &def_pline); + #ifdef H5_HAVE_PARALLEL if(using_mpi && chunk_info.addr) H5MM_free(chunk_info.addr); @@ -3897,6 +4221,195 @@ done: FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL) } /* end H5D__chunk_allocate() */ + +/*------------------------------------------------------------------------- + * Function: H5D__chunk_update_old_edge_chunks + * + * Purpose: Update all chunks which were previously partial edge + * chunks and are now complete. Determines exactly which + * chunks need to be updated and locks each into cache using + * the 'prev_unfilt_chunk' flag, then unlocks it, causing + * filters to be applied as necessary. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * April 14, 2010 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, hsize_t old_dim[]) +{ + hsize_t old_edge_chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of first previously incomplete chunk in each dimension */ + hsize_t max_edge_chunk_sc[H5O_LAYOUT_NDIMS]; /* largest offset of chunks that might need to be modified in each dimension */ + hbool_t new_full_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of chunks in this dimension needs to be modified */ + const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */ + const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */ + hsize_t chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */ + const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */ + unsigned space_ndims; /* Dataset's space rank */ + const hsize_t *space_dim; /* Dataset's dataspace dimensions */ + unsigned op_dim; /* Current operationg dimension */ + H5D_io_info_t chk_io_info; /* Chunked I/O info object */ + H5D_chunk_ud_t chk_udata; /* User data for locking chunk */ + H5D_storage_t chk_store; /* Chunk storage information */ + H5D_dset_info_t chk_dset_info; /* Chunked I/O dset info object */ + H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */ + H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */ + void *chunk; /* The file chunk */ + hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */ + const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk); + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + HDassert(dset && H5D_CHUNKED == layout->type); + HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + H5D_CHUNK_STORAGE_INDEX_CHK(sc); + HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER)); + HDassert(pline->nused > 0); + HDassert(layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS); + + /* Retrieve the dataset dimensions */ + space_dim = dset->shared->curr_dims; + space_ndims = dset->shared->ndims; + + /* The last dimension in chunk_offset is always 0 */ + chunk_sc[space_ndims] = (hsize_t)0; + + /* Check if any current dimensions are smaller than the chunk size, or if + * any old dimensions are 0. If so we do not have to do anything. */ + for(op_dim=0; op_dim<space_ndims; op_dim++) + if((space_dim[op_dim] < chunk_dim[op_dim]) || old_dim[op_dim] == 0) { + /* Reset any cached chunk info for this dataset */ + H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last); + HGOTO_DONE(SUCCEED) + } /* end if */ + + /* + * Initialize structures needed to lock chunks into cache + */ + /* Fill the DXPL cache values for later use */ + if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache") + + /* Set up chunked I/O info object, for operations on chunks (in callback). + * Note that we only need to set chunk_offset once, as the array's address + * will never change. */ + chk_store.chunk.scaled = chunk_sc; + + chk_io_info.dxpl_cache = dxpl_cache; + chk_io_info.md_dxpl_id = dxpl_id; + chk_io_info.op_type = H5D_IO_OP_READ; + chk_io_info.raw_dxpl_id = H5AC_rawdata_dxpl_id; + + chk_dset_info.dset = dset; + chk_dset_info.store = &chk_store; + chk_dset_info.u.rbuf = NULL; + chk_io_info.dsets_info = &chk_dset_info; + + /* + * Determine the edges of the dataset which need to be modified + */ + for(op_dim=0; op_dim<space_ndims; op_dim++) { + /* Start off with this dimension marked as not needing to be modified */ + new_full_dim[op_dim] = FALSE; + + /* Calulate offset of first previously incomplete chunk in this + * dimension */ + old_edge_chunk_sc[op_dim] = (old_dim[op_dim] / chunk_dim[op_dim]); + + /* Calculate the largest offset of chunks that might need to be + * modified in this dimension */ + max_edge_chunk_sc[op_dim] = MIN((old_dim[op_dim] - 1) / chunk_dim[op_dim], + MAX((space_dim[op_dim] / chunk_dim[op_dim]), 1) - 1); + + /* Check for old_dim aligned with chunk boundary in this dimension, if + * so we do not need to modify chunks along the edge in this dimension + */ + if(old_dim[op_dim] % chunk_dim[op_dim] == 0) + continue; + + /* Check if the dataspace expanded enough to cause the old edge chunks + * in this dimension to become full */ + if((space_dim[op_dim]/chunk_dim[op_dim]) >= (old_edge_chunk_sc[op_dim] + 1)) + new_full_dim[op_dim] = TRUE; + } /* end for */ + + /* Main loop: fix old edge chunks */ + for(op_dim=0; op_dim<space_ndims; op_dim++) { + /* Check if allocation along this dimension is really necessary */ + if(!new_full_dim[op_dim]) + continue; + else { + HDassert(max_edge_chunk_sc[op_dim] == old_edge_chunk_sc[op_dim]); + + /* Reset the chunk offset indices */ + HDmemset(chunk_sc, 0, (space_ndims * sizeof(chunk_sc[0]))); + chunk_sc[op_dim] = old_edge_chunk_sc[op_dim]; + + carry = FALSE; + } /* end if */ + + while(!carry) { + int i; /* Local index variable */ + + /* Make sure the chunk is really a former edge chunk */ + HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, chunk_sc, old_dim) + && !H5D__chunk_is_partial_edge_chunk(space_ndims, chunk_dim, chunk_sc, space_dim)); + + /* Lookup the chunk */ + if(H5D__chunk_lookup(dset, dxpl_id, chunk_sc, &chk_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* If this chunk does not exist in cache or on disk, no need to do + * anything */ + if(H5F_addr_defined(chk_udata.chunk_block.offset) + || (UINT_MAX != chk_udata.idx_hint)) { + /* Lock the chunk into cache. H5D__chunk_lock will take care of + * updating the chunk to no longer be an edge chunk. */ + if(NULL == (chunk = (void *)H5D__chunk_lock(&chk_io_info, &chk_udata, FALSE, TRUE))) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk") + + /* Unlock the chunk */ + if(H5D__chunk_unlock(&chk_io_info, &chk_udata, TRUE, chunk, (uint32_t)0) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk") + } /* end if */ + + /* Increment indices */ + carry = TRUE; + for(i = ((int)space_ndims - 1); i >= 0; --i) { + if((unsigned)i != op_dim) { + ++chunk_sc[i]; + if(chunk_sc[i] > (hsize_t) max_edge_chunk_sc[i]) + chunk_sc[i] = 0; + else { + carry = FALSE; + break; + } /* end else */ + } /* end if */ + } /* end for */ + } /* end while(!carry) */ + + /* Adjust max_edge_chunk_sc so we don't modify the same chunk twice. + * Also check if this dimension started from 0 (and hence modified all + * of the old edge chunks. */ + if(old_edge_chunk_sc[op_dim] == 0) + break; + else + --max_edge_chunk_sc[op_dim]; + } /* end for(op_dim=0...) */ + + /* Reset any cached chunk info for this dataset */ + H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__chunk_update_old_edge_chunks() */ + #ifdef H5_HAVE_PARALLEL /*------------------------------------------------------------------------- @@ -4067,7 +4580,7 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata) +H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk) { const H5D_io_info_t *io_info = udata->io_info; /* Local pointer to I/O info */ const H5D_t *dset = io_info->dsets_info[0].dset; /* Local pointer to the dataset info */ @@ -4094,6 +4607,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata) /* Get the info for the chunk in the file */ if(H5D__chunk_lookup(dset, io_info->md_dxpl_id, scaled, &chk_udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + chk_udata.new_unfilt_chunk = new_unfilt_chunk; /* If this chunk does not exist in cache or on disk, no need to do anything */ if(!H5F_addr_defined(chk_udata.chunk_block.offset) && UINT_MAX == chk_udata.idx_hint) @@ -4125,7 +4639,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata) HGOTO_ERROR(H5E_DATASET, H5E_CANTSELECT, FAIL, "unable to select hyperslab") /* Lock the chunk into the cache, to get a pointer to the chunk buffer */ - if(NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE))) + if(NULL == (chunk = (void *)H5D__chunk_lock(io_info, &chk_udata, FALSE, FALSE))) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to lock raw data chunk") @@ -4279,6 +4793,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) hsize_t max_mod_chunk_sc[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk to modify in each dimension */ hssize_t max_fill_chunk_sc[H5O_LAYOUT_NDIMS]; /* Scaled offset of last chunk that might be filled in each dimension */ hbool_t fill_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension needs to be filled */ + hsize_t min_partial_chunk_sc[H5O_LAYOUT_NDIMS]; /* Offset of first partial (or empty) chunk in each dimension */ + hbool_t new_unfilt_dim[H5O_LAYOUT_NDIMS]; /* Whether the plane of edge chunks in this dimension are newly unfiltered */ H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_io_info_t chk_io_info; /* Chunked I/O info object */ H5D_dset_info_t chk_dset_info; /* Chunked I/O dset info object */ @@ -4299,6 +4815,8 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) hsize_t scaled[H5O_LAYOUT_NDIMS]; /* Scaled offset of current chunk */ hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */ uint32_t elmts_per_chunk; /* Elements in chunk */ + hbool_t disable_edge_filters = FALSE; /* Whether to disable filters on partial edge chunks */ + hbool_t new_unfilt_chunk = FALSE; /* Whether the chunk is newly unfiltered */ unsigned u; /* Local index variable */ const H5O_storage_chunk_t *sc = &(layout->storage.u.chunk); herr_t ret_value = SUCCEED; /* Return value */ @@ -4390,6 +4908,11 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) idx_udata.layout = &layout->u.chunk; idx_udata.storage = &layout->storage.u.chunk; + /* Determine if partial edge chunk filters are disabled */ + disable_edge_filters = (layout->u.chunk.flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) + && (idx_info.pline->nused > 0); + /* * Determine the chunks which need to be filled or removed */ @@ -4416,13 +4939,31 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) min_mod_chunk_sc[op_dim] = space_dim[op_dim] / chunk_dim[op_dim]; /* Determine if we need to fill chunks in this dimension */ - if((hssize_t)min_mod_chunk_sc[op_dim] == max_fill_chunk_sc[op_dim]) + if((hssize_t)min_mod_chunk_sc[op_dim] == max_fill_chunk_sc[op_dim]) { fill_dim[op_dim] = TRUE; - else + + /* If necessary, check if chunks in this dimension that need to + * be filled are new partial edge chunks */ + if(disable_edge_filters && old_dim[op_dim] >= (min_mod_chunk_sc[op_dim] + 1)) + new_unfilt_dim[op_dim] = TRUE; + else + new_unfilt_dim[op_dim] = FALSE; + } /* end if */ + else { fill_dim[op_dim] = FALSE; + new_unfilt_dim[op_dim] = FALSE; + } /* end else */ } /* end if */ - else + else { fill_dim[op_dim] = FALSE; + new_unfilt_dim[op_dim] = FALSE; + } /* end else */ + + /* If necessary, calculate the smallest offset of non-previously full + * chunks in this dimension, so we know these chunks were previously + * unfiltered */ + if(disable_edge_filters) + min_partial_chunk_sc[op_dim] = old_dim[op_dim] / chunk_dim[op_dim]; } /* end for */ /* Main loop: fill or remove chunks */ @@ -4462,8 +5003,27 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim) HDassert(fill_dim[op_dim]); HDassert(scaled[op_dim] == min_mod_chunk_sc[op_dim]); + /* Make sure this is an edge chunk */ + HDassert(H5D__chunk_is_partial_edge_chunk(space_ndims, layout->u.chunk.dim, scaled, space_dim)); + + /* Determine if the chunk just became an unfiltered chunk */ + if(new_unfilt_dim[op_dim]) { + new_unfilt_chunk = TRUE; + for(u = 0; u < space_ndims; u++) + if(scaled[u] == min_partial_chunk_sc[u]) { + new_unfilt_chunk = FALSE; + break; + } /* end if */ + } /* end if */ + + /* Make sure that, if we think this is a new unfiltered chunk, + * it was previously not an edge chunk */ + HDassert(!new_unfilt_dim[op_dim] || (!new_unfilt_chunk != + !H5D__chunk_is_partial_edge_chunk(space_ndims, layout->u.chunk.dim, scaled, old_dim))); + HDassert(!new_unfilt_chunk || new_unfilt_dim[op_dim]); + /* Fill the unused parts of the chunk */ - if(H5D__chunk_prune_fill(&udata) < 0) + if(H5D__chunk_prune_fill(&udata, new_unfilt_chunk) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write fill value") } /* end if */ else { @@ -4900,8 +5460,15 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) /* Check for filtered chunks */ if((is_vlen || fix_ref) && pline && pline->nused) { - must_filter = TRUE; - cb_struct.func = NULL; /* no callback function when failed */ + /* Check if we should disable filters on this chunk */ + if(udata->common.layout->flags + & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { + /* Check if the chunk is an edge chunk, and disable filters if so */ + if(!H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim, chunk_rec->scaled, udata->dset_dims)) + must_filter = TRUE; + } /* end if */ + else + must_filter = TRUE; } /* end if */ /* Resize the buf if it is too small to hold the data */ @@ -4934,6 +5501,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) if(must_filter) { unsigned filter_mask = chunk_rec->filter_mask; + cb_struct.func = NULL; /* no callback function when failed */ if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &filter_mask, H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "data pipeline read failed") } /* end if */ @@ -5012,7 +5580,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) udata_dst.common.layout->down_chunks, udata_dst.common.scaled); /* Allocate chunk in the file */ - if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, udata_dst.common.scaled) < 0) + if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") /* Write chunk data to destination file */ @@ -5649,6 +6217,45 @@ done: /*------------------------------------------------------------------------- + * Function: H5D__chunk_is_partial_edge_chunk + * + * Purpose: Checks to see if the chunk is a partial edge chunk. + * Either dset or (dset_dims and dset_ndims) must be + * provided. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * 19 Nov 2009 + * + *------------------------------------------------------------------------- + */ +static hbool_t +H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, const uint32_t *chunk_dims, + const hsize_t scaled[], const hsize_t *dset_dims) +{ + unsigned u; /* Local index variable */ + hbool_t ret_value = FALSE; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(scaled); + HDassert(dset_ndims > 0); + HDassert(dset_dims); + HDassert(chunk_dims); + + /* check if this is a partial edge chunk */ + for(u = 0; u < dset_ndims; u++) + if(((scaled[u] + 1) * chunk_dims[u]) > dset_dims[u]) + HGOTO_DONE(TRUE); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__chunk_is_partial_edge_chunk() */ + + +/*------------------------------------------------------------------------- * Function: H5D__chunk_file_alloc() * * Purpose: Chunk allocation: @@ -5664,7 +6271,7 @@ done: */ static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk, - H5F_block_t *new_chunk, hbool_t *need_insert, hsize_t scaled[]) + H5F_block_t *new_chunk, hbool_t *need_insert) { hbool_t alloc_chunk = FALSE; /* Whether to allocate chunk */ herr_t ret_value = SUCCEED; /* Return value */ @@ -5681,6 +6288,8 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old HDassert(new_chunk); HDassert(need_insert); + *need_insert = FALSE; + /* Check for filters on chunks */ if(idx_info->pline->nused > 0) { /* Sanity/error checking block */ @@ -5735,21 +6344,24 @@ H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old /* Actually allocate space for the chunk in the file */ if(alloc_chunk) { - switch(idx_info->storage->idx_type) { - case H5D_CHUNK_IDX_BTREE: + switch(idx_info->storage->idx_type) { + case H5D_CHUNK_IDX_EARRAY: + case H5D_CHUNK_IDX_FARRAY: + case H5D_CHUNK_IDX_BT2: + case H5D_CHUNK_IDX_BTREE: HDassert(new_chunk->length > 0); - H5_CHECK_OVERFLOW(new_chunk->length, /*From: */uint32_t, /*To: */hsize_t); - new_chunk->offset = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, (hsize_t)new_chunk->length); - if(!H5F_addr_defined(new_chunk->offset)) - HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed") - *need_insert = TRUE; - break; - - case H5D_CHUNK_IDX_NTYPES: - default: - HDassert(0 && "This should never be executed!"); - break; - } /* end switch */ + H5_CHECK_OVERFLOW(new_chunk->length, /*From: */uint32_t, /*To: */hsize_t); + new_chunk->offset = H5MF_alloc(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, (hsize_t)new_chunk->length); + if(!H5F_addr_defined(new_chunk->offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "file allocation failed") + *need_insert = TRUE; + break; + + case H5D_CHUNK_IDX_NTYPES: + default: + HDassert(0 && "This should never be executed!"); + break; + } /* end switch */ } /* end if */ HDassert(H5F_addr_defined(new_chunk->offset)); diff --git a/src/H5Dearray.c b/src/H5Dearray.c new file mode 100644 index 0000000..f24c69a --- /dev/null +++ b/src/H5Dearray.c @@ -0,0 +1,1740 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Programmer: Quincey Koziol <koziol@hdfgroup.org> + * Tuesday, January 27, 2009 + * + * Purpose: Extensible array indexed (chunked) I/O functions. The chunks + * are given a single-dimensional index which is used as the + * offset in an extensible array that maps a chunk coordinate to + * a disk address. + * + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Dmodule.h" /* This source code file is part of the H5D module */ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5EAprivate.h" /* Extensible arrays */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector functions */ + + +/****************/ +/* Local Macros */ +/****************/ + +/* Value to fill unset array elements with */ +#define H5D_EARRAY_FILL HADDR_UNDEF +#define H5D_EARRAY_FILT_FILL {HADDR_UNDEF, 0, 0} + + +/******************/ +/* Local Typedefs */ +/******************/ + +/* Extensible array create/open user data */ +typedef struct H5D_earray_ctx_ud_t { + const H5F_t *f; /* Pointer to file info */ + uint32_t chunk_size; /* Size of chunk (bytes) */ +} H5D_earray_ctx_ud_t; + +/* Extensible array callback context */ +typedef struct H5D_earray_ctx_t { + size_t file_addr_len; /* Size of addresses in the file (bytes) */ + size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */ +} H5D_earray_ctx_t; + +/* User data for chunk callbacks */ +typedef struct H5D_earray_ud_t { + H5F_t *f; /* File pointer for operation */ + hid_t dxpl_id; /* DXPL ID for operation */ +} H5D_earray_ud_t; + +/* Extensible Array callback info for iteration over chunks */ +typedef struct H5D_earray_it_ud_t { + H5D_chunk_common_ud_t common; /* Common info for Fixed Array user data (must be first) */ + H5D_chunk_rec_t chunk_rec; /* Generic chunk record for callback */ + hbool_t filtered; /* Whether the chunks are filtered */ + H5D_chunk_cb_func_t cb; /* Chunk callback routine */ + void *udata; /* User data for chunk callback routine */ +} H5D_earray_it_ud_t; + +/* Native extensible array element for chunks w/filters */ +typedef struct H5D_earray_filt_elmt_t { + haddr_t addr; /* Address of chunk */ + uint32_t nbytes; /* Size of chunk (in file) */ + uint32_t filter_mask; /* Excluded filters for chunk */ +} H5D_earray_filt_elmt_t; + + +/********************/ +/* Local Prototypes */ +/********************/ +/* Extensible array iterator callbacks */ +static int H5D__earray_idx_iterate_cb(hsize_t idx, const void *_elmt, void *_udata); +static int H5D__earray_idx_delete_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata); + +/* Extensible array class callbacks for chunks w/o filters */ +static void *H5D__earray_crt_context(void *udata); +static herr_t H5D__earray_dst_context(void *ctx); +static herr_t H5D__earray_fill(void *nat_blk, size_t nelmts); +static herr_t H5D__earray_encode(void *raw, const void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__earray_decode(const void *raw, void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__earray_debug(FILE *stream, int indent, int fwidth, + hsize_t idx, const void *elmt); +static void *H5D__earray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr); +static herr_t H5D__earray_dst_dbg_context(void *dbg_ctx); + +/* Extensible array class callbacks for chunks w/filters */ +/* (some shared with callbacks for chunks w/o filters) */ +static herr_t H5D__earray_filt_fill(void *nat_blk, size_t nelmts); +static herr_t H5D__earray_filt_encode(void *raw, const void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__earray_filt_decode(const void *raw, void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, + hsize_t idx, const void *elmt); + +/* Chunked layout indexing callbacks */ +static herr_t H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, + const H5S_t *space, haddr_t dset_ohdr_addr); +static herr_t H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info); +static hbool_t H5D__earray_idx_is_space_alloc(const H5O_storage_chunk_t *storage); +static herr_t H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata, const H5D_t *dset); +static herr_t H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +static herr_t H5D__earray_idx_resize(H5O_layout_chunk_t *layout); +static int H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); +static herr_t H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_common_ud_t *udata); +static herr_t H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst); +static herr_t H5D__earray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, + H5O_storage_chunk_t *storage_dst, hid_t dxpl_id); +static herr_t H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, + hsize_t *size); +static herr_t H5D__earray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr); +static herr_t H5D__earray_idx_dump(const H5O_storage_chunk_t *storage, + FILE *stream); +static herr_t H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info); + +/* Generic extensible array routines */ +static herr_t H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info); + + +/*********************/ +/* Package Variables */ +/*********************/ + +/* Extensible array indexed chunk I/O ops */ +const H5D_chunk_ops_t H5D_COPS_EARRAY[1] = {{ + H5D__earray_idx_init, /* init */ + H5D__earray_idx_create, /* create */ + H5D__earray_idx_is_space_alloc, /* is_space_alloc */ + H5D__earray_idx_insert, /* insert */ + H5D__earray_idx_get_addr, /* get_addr */ + H5D__earray_idx_resize, /* resize */ + H5D__earray_idx_iterate, /* iterate */ + H5D__earray_idx_remove, /* remove */ + H5D__earray_idx_delete, /* delete */ + H5D__earray_idx_copy_setup, /* copy_setup */ + H5D__earray_idx_copy_shutdown, /* copy_shutdown */ + H5D__earray_idx_size, /* size */ + H5D__earray_idx_reset, /* reset */ + H5D__earray_idx_dump, /* dump */ + H5D__earray_idx_dest /* destroy */ +}}; + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + +/* Extensible array class callbacks for dataset chunks w/o filters */ +const H5EA_class_t H5EA_CLS_CHUNK[1]={{ + H5EA_CLS_CHUNK_ID, /* Type of extensible array */ + "Chunk w/o filters", /* Name of extensible array class */ + sizeof(haddr_t), /* Size of native element */ + H5D__earray_crt_context, /* Create context */ + H5D__earray_dst_context, /* Destroy context */ + H5D__earray_fill, /* Fill block of missing elements callback */ + H5D__earray_encode, /* Element encoding callback */ + H5D__earray_decode, /* Element decoding callback */ + H5D__earray_debug, /* Element debugging callback */ + H5D__earray_crt_dbg_context, /* Create debugging context */ + H5D__earray_dst_dbg_context /* Destroy debugging context */ +}}; + +/* Extensible array class callbacks for dataset chunks w/filters */ +const H5EA_class_t H5EA_CLS_FILT_CHUNK[1]={{ + H5EA_CLS_FILT_CHUNK_ID, /* Type of extensible array */ + "Chunk w/filters", /* Name of extensible array class */ + sizeof(H5D_earray_filt_elmt_t), /* Size of native element */ + H5D__earray_crt_context, /* Create context */ + H5D__earray_dst_context, /* Destroy context */ + H5D__earray_filt_fill, /* Fill block of missing elements callback */ + H5D__earray_filt_encode, /* Element encoding callback */ + H5D__earray_filt_decode, /* Element decoding callback */ + H5D__earray_filt_debug, /* Element debugging callback */ + H5D__earray_crt_dbg_context, /* Create debugging context */ + H5D__earray_dst_dbg_context /* Destroy debugging context */ +}}; + + +/*******************/ +/* Local Variables */ +/*******************/ + +/* Declare a free list to manage the H5D_earray_ctx_t struct */ +/* Declare a free list to manage the H5D_earray_ctx_ud_t struct */ +H5FL_DEFINE_STATIC(H5D_earray_ctx_t); +H5FL_DEFINE_STATIC(H5D_earray_ctx_ud_t); + + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_crt_context + * + * Purpose: Create context for callbacks + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static void * +H5D__earray_crt_context(void *_udata) +{ + H5D_earray_ctx_t *ctx; /* Extensible array callback context */ + H5D_earray_ctx_ud_t *udata = (H5D_earray_ctx_ud_t *)_udata; /* User data for extensible array context */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(udata); + HDassert(udata->f); + HDassert(udata->chunk_size > 0); + + /* Allocate new context structure */ + if(NULL == (ctx = H5FL_MALLOC(H5D_earray_ctx_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate extensible array client callback context") + + /* Initialize the context */ + ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f); + + /* Compute the size required for encoding the size of a chunk, allowing + * for an extra byte, in case the filter makes the chunk larger. + */ + ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8); + if(ctx->chunk_size_len > 8) + ctx->chunk_size_len = 8; + + /* Set return value */ + ret_value = ctx; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_crt_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_dst_context + * + * Purpose: Destroy context for callbacks + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_dst_context(void *_ctx) +{ + H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(ctx); + + /* Release context structure */ + ctx = H5FL_FREE(H5D_earray_ctx_t, ctx); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_dst_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_fill + * + * Purpose: Fill "missing elements" in block of elements + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Tuesday, January 27, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_fill(void *nat_blk, size_t nelmts) +{ + haddr_t fill_val = H5D_EARRAY_FILL; /* Value to fill elements with */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(nat_blk); + HDassert(nelmts); + + H5VM_array_fill(nat_blk, &fill_val, H5EA_CLS_CHUNK->nat_elmt_size, nelmts); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_fill() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_encode + * + * Purpose: Encode an element from "native" to "raw" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Tuesday, January 27, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_encode(void *raw, const void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */ + const haddr_t *elmt = (const haddr_t *)_elmt; /* Convenience pointer to native elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + HDassert(ctx); + + /* Encode native elements into raw elements */ + while(nelmts) { + /* Encode element */ + /* (advances 'raw' pointer) */ + H5F_addr_encode_len(ctx->file_addr_len, (uint8_t **)&raw, *elmt); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to encode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_encode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_decode + * + * Purpose: Decode an element from "raw" to "native" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */ + haddr_t *elmt = (haddr_t *)_elmt; /* Convenience pointer to native elements */ + const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + + /* Decode raw elements into native elements */ + while(nelmts) { + /* Decode element */ + /* (advances 'raw' pointer) */ + H5F_addr_decode_len(ctx->file_addr_len, &raw, elmt); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to decode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_decode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_debug + * + * Purpose: Display an element for debugging + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_debug(FILE *stream, int indent, int fwidth, hsize_t idx, + const void *elmt) +{ + char temp_str[128]; /* Temporary string, for formatting */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(stream); + HDassert(elmt); + + /* Print element */ + sprintf(temp_str, "Element #%llu:", (unsigned long long)idx); + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, temp_str, + *(const haddr_t *)elmt); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_debug() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_filt_fill + * + * Purpose: Fill "missing elements" in block of elements + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_filt_fill(void *nat_blk, size_t nelmts) +{ + H5D_earray_filt_elmt_t fill_val = H5D_EARRAY_FILT_FILL; /* Value to fill elements with */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(nat_blk); + HDassert(nelmts); + HDassert(sizeof(fill_val) == H5EA_CLS_FILT_CHUNK->nat_elmt_size); + + H5VM_array_fill(nat_blk, &fill_val, H5EA_CLS_FILT_CHUNK->nat_elmt_size, nelmts); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_filt_fill() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_filt_encode + * + * Purpose: Encode an element from "native" to "raw" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_filt_encode(void *_raw, const void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */ + uint8_t *raw = (uint8_t *)_raw; /* Convenience pointer to raw elements */ + const H5D_earray_filt_elmt_t *elmt = (const H5D_earray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + HDassert(ctx); + + /* Encode native elements into raw elements */ + while(nelmts) { + /* Encode element */ + /* (advances 'raw' pointer) */ + H5F_addr_encode_len(ctx->file_addr_len, &raw, elmt->addr); + UINT64ENCODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len); + UINT32ENCODE(raw, elmt->filter_mask); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to encode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_filt_encode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_filt_decode + * + * Purpose: Decode an element from "raw" to "native" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_filt_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_earray_ctx_t *ctx = (H5D_earray_ctx_t *)_ctx; /* Extensible array callback context */ + H5D_earray_filt_elmt_t *elmt = (H5D_earray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */ + const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + + /* Decode raw elements into native elements */ + while(nelmts) { + /* Decode element */ + /* (advances 'raw' pointer) */ + H5F_addr_decode_len(ctx->file_addr_len, &raw, &elmt->addr); + UINT64DECODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len); + UINT32DECODE(raw, elmt->filter_mask); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to decode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_filt_decode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_filt_debug + * + * Purpose: Display an element for debugging + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, + const void *_elmt) +{ + const H5D_earray_filt_elmt_t *elmt = (const H5D_earray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */ + char temp_str[128]; /* Temporary string, for formatting */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(stream); + HDassert(elmt); + + /* Print element */ + sprintf(temp_str, "Element #%llu:", (unsigned long long)idx); + HDfprintf(stream, "%*s%-*s {%a, %u, %0x}\n", indent, "", fwidth, temp_str, + elmt->addr, elmt->nbytes, elmt->filter_mask); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_filt_debug() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_crt_dbg_context + * + * Purpose: Create context for debugging callback + * (get the layout message in the specified object header) + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Vailin Choi; July 2010 + * + *------------------------------------------------------------------------- + */ +static void * +H5D__earray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr) +{ + H5D_earray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ + H5O_loc_t obj_loc; /* Pointer to an object's location */ + hbool_t obj_opened = FALSE; /* Flag to indicate that the object header was opened */ + H5O_layout_t layout; /* Layout message */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(f); + HDassert(H5F_addr_defined(obj_addr)); + + /* Allocate context for debugging callback */ + if(NULL == (dbg_ctx = H5FL_MALLOC(H5D_earray_ctx_ud_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate extensible array client callback context") + + /* Set up the object header location info */ + H5O_loc_reset(&obj_loc); + obj_loc.file = f; + obj_loc.addr = obj_addr; + + /* Open the object header where the layout message resides */ + if(H5O_open(&obj_loc) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header") + obj_opened = TRUE; + + /* Read the layout message */ + if(NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout, dxpl_id)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info") + + /* close the object header */ + if(H5O_close(&obj_loc) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header") + + /* Create user data */ + dbg_ctx->f = f; + dbg_ctx->chunk_size = layout.u.chunk.size; + + /* Set return value */ + ret_value = dbg_ctx; + +done: + /* Cleanup on error */ + if(ret_value == NULL) { + /* Release context structure */ + if(dbg_ctx) + dbg_ctx = H5FL_FREE(H5D_earray_ctx_ud_t, dbg_ctx); + + /* Close object header */ + if(obj_opened) { + if(H5O_close(&obj_loc) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header") + } /* end if */ + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_crt_dbg_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_dst_dbg_context + * + * Purpose: Destroy context for debugging callback + * (free the layout message from the specified object header) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi; July 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_dst_dbg_context(void *_dbg_ctx) +{ + H5D_earray_ctx_ud_t *dbg_ctx = (H5D_earray_ctx_ud_t *)_dbg_ctx; /* Context for extensible array callback */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(dbg_ctx); + + /* Release context structure */ + dbg_ctx = H5FL_FREE(H5D_earray_ctx_ud_t, dbg_ctx); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_dst_dbg_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_open + * + * Purpose: Opens an existing extensible array. + * + * Note: This information is passively initialized from each index + * operation callback because those abstract chunk index operations + * are designed to work with the v1 B-tree chunk indices also, + * which don't require an 'open' for the data structure. + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_earray_ctx_ud_t udata; /* User data for extensible array open call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->layout->idx_type); + HDassert(idx_info->storage); + HDassert(H5D_CHUNK_IDX_EARRAY == idx_info->storage->idx_type); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(NULL == idx_info->storage->u.earray.ea); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Open the extensible array for the chunk index */ + if(NULL == (idx_info->storage->u.earray.ea = H5EA_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open extensible array") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_open() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_init + * + * Purpose: Initialize the indexing information for a dataset. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Wednesday, May 27, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t *space, + haddr_t dset_ohdr_addr) +{ + hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Max. size of dataset dimensions */ + int unlim_dim; /* Rank of the dataset's unlimited dimension */ + int sndims; /* Rank of dataspace */ + unsigned ndims; /* Rank of dataspace */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(space); + HDassert(H5F_addr_defined(dset_ohdr_addr)); + + /* Get the dim info for dataset */ + if((sndims = H5S_get_simple_extent_dims(space, NULL, max_dims)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions") + H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int); + + /* Find the rank of the unlimited dimension */ + unlim_dim = (-1); + for(u = 0; u < ndims; u++) { + /* Check for unlimited dimension */ + if(H5S_UNLIMITED == max_dims[u]) { + /* Check if we've already found an unlimited dimension */ + if(unlim_dim >= 0) + HGOTO_ERROR(H5E_DATASET, H5E_ALREADYINIT, FAIL, "already found unlimited dimension") + + /* Set the unlimited dimension */ + unlim_dim = (int)u; + } /* end if */ + } /* end for */ + + /* Check if we didn't find an unlimited dimension */ + if(unlim_dim < 0) + HGOTO_ERROR(H5E_DATASET, H5E_UNINITIALIZED, FAIL, "didn't find unlimited dimension") + + /* Set the unlimited dimension for the layout's future use */ + idx_info->layout->u.earray.unlim_dim = (unsigned)unlim_dim; + + /* Store the dataset's object header address for later */ + idx_info->storage->u.earray.dset_ohdr_addr = dset_ohdr_addr; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_init() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_create + * + * Purpose: Creates a new indexed-storage extensible array and initializes + * the layout struct with information about the storage. The + * struct should be immediately written to the object header. + * + * This function must be called before passing LAYOUT to any of + * the other indexed storage functions! + * + * Return: Non-negative on success (with the LAYOUT argument initialized + * and ready to write to an object header). Negative on failure. + * + * Programmer: Quincey Koziol + * Tuesday, January 27, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_create(const H5D_chk_idx_info_t *idx_info) +{ + H5EA_create_t cparam; /* Extensible array creation parameters */ + H5D_earray_ctx_ud_t udata; /* User data for extensible array create call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(!H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(NULL == idx_info->storage->u.earray.ea); + + /* General parameters */ + if(idx_info->pline->nused > 0) { + unsigned chunk_size_len; /* Size of encoded chunk size */ + + /* Compute the size required for encoding the size of a chunk, allowing + * for an extra byte, in case the filter makes the chunk larger. + */ + chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8); + if(chunk_size_len > 8) + chunk_size_len = 8; + + cparam.cls = H5EA_CLS_FILT_CHUNK; + cparam.raw_elmt_size = (uint8_t)(H5F_SIZEOF_ADDR(idx_info->f) + chunk_size_len + 4); + } /* end if */ + else { + cparam.cls = H5EA_CLS_CHUNK; + cparam.raw_elmt_size = (uint8_t)H5F_SIZEOF_ADDR(idx_info->f); + } /* end else */ + cparam.max_nelmts_bits = idx_info->layout->u.earray.cparam.max_nelmts_bits; + HDassert(cparam.max_nelmts_bits > 0); + cparam.idx_blk_elmts = idx_info->layout->u.earray.cparam.idx_blk_elmts; + HDassert(cparam.idx_blk_elmts > 0); + cparam.sup_blk_min_data_ptrs = idx_info->layout->u.earray.cparam.sup_blk_min_data_ptrs; + HDassert(cparam.sup_blk_min_data_ptrs > 0); + cparam.data_blk_min_elmts = idx_info->layout->u.earray.cparam.data_blk_min_elmts; + HDassert(cparam.data_blk_min_elmts > 0); + cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.earray.cparam.max_dblk_page_nelmts_bits; + HDassert(cparam.max_dblk_page_nelmts_bits > 0); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Create the extensible array for the chunk index */ + if(NULL == (idx_info->storage->u.earray.ea = H5EA_create(idx_info->f, idx_info->dxpl_id, &cparam, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create extensible array") + + /* Get the address of the extensible array in file */ + if(H5EA_get_addr(idx_info->storage->u.earray.ea, &(idx_info->storage->idx_addr)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array address") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_create() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static hbool_t +H5D__earray_idx_is_space_alloc(const H5O_storage_chunk_t *storage) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + + FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr)) +} /* end H5D__earray_idx_is_space_alloc() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_insert + * + * Purpose: Insert chunk address into the indexing structure. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; May 2014 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, + const H5D_t H5_ATTR_UNUSED *dset) +{ + H5EA_t *ea; /* Pointer to extensible array structure */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the extensible array is open yet */ + if(NULL == idx_info->storage->u.earray.ea) { + /* Open the extensible array in file */ + if(H5D__earray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array") + } else /* Patch the top level file pointer contained in ea if needed */ + H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); + + /* Set convenience pointer to extensible array structure */ + ea = idx_info->storage->u.earray.ea; + + if(!H5F_addr_defined(udata->chunk_block.offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "The chunk should have allocated already") + if(udata->chunk_idx != (udata->chunk_idx & 0xffffffff)) /* negative value */ + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "chunk index must be less than 2^32") + + /* Check for filters on chunks */ + if(idx_info->pline->nused > 0) { + H5D_earray_filt_elmt_t elmt; /* Extensible array element */ + + elmt.addr = udata->chunk_block.offset; + H5_CHECKED_ASSIGN(elmt.nbytes, uint32_t, udata->chunk_block.length, hsize_t); + elmt.filter_mask = udata->filter_mask; + + /* Set the info for the chunk */ + if(H5EA_set(ea, idx_info->dxpl_id, udata->chunk_idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk info") + } /* end if */ + else { + /* Set the address for the chunk */ + if(H5EA_set(ea, idx_info->dxpl_id, udata->chunk_idx, &udata->chunk_block.offset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk address") + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__earray_idx_insert() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_get_addr + * + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) +{ + H5EA_t *ea; /* Pointer to extensible array structure */ + hsize_t idx; /* Array index of chunk */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the extensible array is open yet */ + if(NULL == idx_info->storage->u.earray.ea) { + /* Open the extensible array in file */ + if(H5D__earray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array") + } else /* Patch the top level file pointer contained in ea if needed */ + H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); + + /* Set convenience pointer to extensible array structure */ + ea = idx_info->storage->u.earray.ea; + + /* Check for unlimited dim. not being the slowest-changing dim. */ + if(idx_info->layout->u.earray.unlim_dim > 0) { + hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */ + unsigned ndims = (idx_info->layout->ndims - 1); /* Number of dimensions */ + unsigned u; + + /* Compute coordinate offset from scaled offset */ + for(u = 0; u < ndims; u++) + swizzled_coords[u] = udata->common.scaled[u] * idx_info->layout->dim[u]; + + H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim); + + /* Calculate the index of this chunk */ + idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_down_chunks); + } /* end if */ + else { + /* Calculate the index of this chunk */ + idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->down_chunks, udata->common.scaled); + } /* end else */ + + udata->chunk_idx = idx; + + /* Check for filters on chunks */ + if(idx_info->pline->nused > 0) { + H5D_earray_filt_elmt_t elmt; /* Extensible array element */ + + /* Get the information for the chunk */ + if(H5EA_get(ea, idx_info->dxpl_id, idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info") + + /* Set the info for the chunk */ + udata->chunk_block.offset = elmt.addr; + udata->chunk_block.length = elmt.nbytes; + udata->filter_mask = elmt.filter_mask; + } /* end if */ + else { + /* Get the address for the chunk */ + if(H5EA_get(ea, idx_info->dxpl_id, idx, &udata->chunk_block.offset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") + + /* Update the other (constant) information for the chunk */ + udata->chunk_block.length = idx_info->layout->size; + udata->filter_mask = 0; + } /* end else */ + + if(!H5F_addr_defined(udata->chunk_block.offset)) + udata->chunk_block.length = 0; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__earray_idx_get_addr() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_resize + * + * Purpose: Calculate/setup the swizzled down chunk array, used for chunk + * index calculations. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, July 23, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_resize(H5O_layout_chunk_t *layout) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(layout); + + /* "Swizzle" constant dimensions for this dataset */ + if(layout->u.earray.unlim_dim > 0) { + hsize_t swizzled_chunks[H5O_LAYOUT_NDIMS]; /* Swizzled form of # of chunks in each dimension */ + + /* Get the swizzled chunk dimensions */ + HDmemcpy(layout->u.earray.swizzled_dim, layout->dim, (layout->ndims - 1) * sizeof(layout->dim[0])); + H5VM_swizzle_coords(uint32_t, layout->u.earray.swizzled_dim, layout->u.earray.unlim_dim); + + /* Get the swizzled number of chunks in each dimension */ + HDmemcpy(swizzled_chunks, layout->chunks, (layout->ndims - 1) * sizeof(swizzled_chunks[0])); + H5VM_swizzle_coords(hsize_t, swizzled_chunks, layout->u.earray.unlim_dim); + + /* Get the swizzled "down" sizes for each dimension */ + if(H5VM_array_down((layout->ndims - 1), swizzled_chunks, layout->u.earray.swizzled_down_chunks) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute swizzled 'down' chunk size value") + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_resize() */ + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_iterate_cb + * + * Purpose: Callback routine for extensible array element iteration. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; Feb 2015 + * + *------------------------------------------------------------------------- + */ +static int +H5D__earray_idx_iterate_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void *_udata) +{ + H5D_earray_it_ud_t *udata = (H5D_earray_it_ud_t *)_udata; /* User data */ + unsigned ndims; /* Rank of chunk */ + int curr_dim; /* Current dimension */ + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Compose generic chunk record for callback */ + if(udata->filtered) { + const H5D_earray_filt_elmt_t *filt_elmt = (const H5D_earray_filt_elmt_t *)_elmt; + + udata->chunk_rec.chunk_addr = filt_elmt->addr; + udata->chunk_rec.nbytes = filt_elmt->nbytes; + udata->chunk_rec.filter_mask = filt_elmt->filter_mask; + } /* end if */ + else + udata->chunk_rec.chunk_addr = *(const haddr_t *)_elmt; + + /* Make "generic chunk" callback */ + if(H5F_addr_defined(udata->chunk_rec.chunk_addr)) + if((ret_value = (udata->cb)(&udata->chunk_rec, udata->udata)) < 0) + HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback"); + + /* Update coordinates of chunk in dataset */ + ndims = udata->common.layout->ndims - 1; + HDassert(ndims > 0); + curr_dim = (int)(ndims - 1); + while(curr_dim >= 0) { + /* Increment coordinate in current dimension */ + udata->chunk_rec.scaled[curr_dim]++; + + /* Check if we went off the end of the current dimension */ + if(udata->chunk_rec.scaled[curr_dim] >= udata->common.layout->chunks[curr_dim]) { + /* Reset coordinate & move to next faster dimension */ + udata->chunk_rec.scaled[curr_dim] = 0; + curr_dim--; + } /* end if */ + else + break; + } /* end while */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__earray_idx_iterate_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_iterate + * + * Purpose: Iterate over the chunks in an index, making a callback + * for each one. + * + * Note: This implementation is slow, particularly for sparse + * extensible arrays, replace it with call to H5EA_iterate() + * when that's available. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static int +H5D__earray_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata) +{ + H5EA_t *ea; /* Pointer to extensible array structure */ + H5EA_stat_t ea_stat; /* Extensible array statistics */ + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(chunk_cb); + HDassert(chunk_udata); + + /* Check if the extensible array is open yet */ + if(NULL == idx_info->storage->u.earray.ea) { + /* Open the extensible array in file */ + if(H5D__earray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array") + } else /* Patch the top level file pointer contained in ea if needed */ + H5EA_patch_file(idx_info->storage->u.earray.ea, idx_info->f); + + /* Set convenience pointer to extensible array structure */ + ea = idx_info->storage->u.earray.ea; + + /* Get the extensible array statistics */ + if(H5EA_get_stats(ea, &ea_stat) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array statistics") + + if(ea_stat.stored.max_idx_set > 0) { + H5D_earray_it_ud_t udata; /* User data for iteration callback */ + + /* Initialize userdata */ + HDmemset(&udata, 0, sizeof udata); + udata.common.layout = idx_info->layout; + udata.common.storage = idx_info->storage; + HDmemset(&udata.chunk_rec, 0, sizeof(udata.chunk_rec)); + udata.filtered = (idx_info->pline->nused > 0); + if(!udata.filtered) { + udata.chunk_rec.nbytes = idx_info->layout->size; + udata.chunk_rec.filter_mask = 0; + } /* end if */ + udata.cb = chunk_cb; + udata.udata = chunk_udata; + + /* Iterate over the extensible array elements */ + if((ret_value = H5EA_iterate(ea, idx_info->dxpl_id, H5D__earray_idx_iterate_cb, &udata)) < 0) + HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over fixed array chunk index"); + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_iterate() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_remove + * + * Purpose: Remove chunk from index. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata) +{ + H5EA_t *ea; /* Pointer to extensible array structure */ + hsize_t idx; /* Array index of chunk */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the extensible array is open yet */ + if(NULL == idx_info->storage->u.earray.ea) + /* Open the extensible array in file */ + if(H5D__earray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array") + + /* Set convenience pointer to extensible array structure */ + ea = idx_info->storage->u.earray.ea; + + /* Check for unlimited dim. not being the slowest-changing dim. */ + if(idx_info->layout->u.earray.unlim_dim > 0) { + hsize_t swizzled_coords[H5O_LAYOUT_NDIMS]; /* swizzled chunk coordinates */ + unsigned ndims = (idx_info->layout->ndims - 1); /* Number of dimensions */ + unsigned u; + + /* Compute coordinate offset from scaled offset */ + for(u = 0; u < ndims; u++) + swizzled_coords[u] = udata->scaled[u] * idx_info->layout->dim[u]; + + H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim); + + /* Calculate the index of this chunk */ + idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_down_chunks); + } /* end if */ + else { + /* Calculate the index of this chunk */ + idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->down_chunks, udata->scaled); + } /* end else */ + + /* Check for filters on chunks */ + if(idx_info->pline->nused > 0) { + H5D_earray_filt_elmt_t elmt; /* Extensible array element */ + + /* Get the info about the chunk for the index */ + if(H5EA_get(ea, idx_info->dxpl_id, idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info") + + /* Remove raw data chunk from file */ + HDassert(H5F_addr_defined(elmt.addr)); + H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + + /* Reset the info about the chunk for the index */ + elmt.addr = HADDR_UNDEF; + elmt.nbytes = 0; + elmt.filter_mask = 0; + if(H5EA_set(ea, idx_info->dxpl_id, idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk info") + } /* end if */ + else { + haddr_t addr = HADDR_UNDEF; /* Chunk address */ + + /* Get the address of the chunk for the index */ + if(H5EA_get(ea, idx_info->dxpl_id, idx, &addr) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") + + /* Remove raw data chunk from file */ + HDassert(H5F_addr_defined(addr)); + H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + + /* Reset the address of the chunk for the index */ + addr = HADDR_UNDEF; + if(H5EA_set(ea, idx_info->dxpl_id, idx, &addr) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk address") + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__earray_idx_remove() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_delete_cb + * + * Purpose: Delete space for chunk in file + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static int +H5D__earray_idx_delete_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) +{ + H5D_earray_ud_t *udata = (H5D_earray_ud_t *)_udata; /* User data for callback */ + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(chunk_rec); + HDassert(H5F_addr_defined(chunk_rec->chunk_addr)); + HDassert(chunk_rec->nbytes > 0); + HDassert(udata); + HDassert(udata->f); + + /* Remove raw data chunk from file */ + H5_CHECK_OVERFLOW(chunk_rec->nbytes, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(udata->f, H5FD_MEM_DRAW, udata->dxpl_id, chunk_rec->chunk_addr, (hsize_t)chunk_rec->nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free chunk") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_delete_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_delete + * + * Purpose: Delete index and raw data storage for entire dataset + * (i.e. all chunks) + * + * Note: This implementation is slow, particularly for sparse + * extensible arrays, replace it with call to H5EA_iterate() + * when that's available. + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Thursday, January 29, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_delete(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + + /* Check if the index data structure has been allocated */ + if(H5F_addr_defined(idx_info->storage->idx_addr)) { + H5D_earray_ud_t udata; /* User data for callback */ + H5D_earray_ctx_ud_t ctx_udata; /* User data for extensible array open call */ + + /* Initialize user data for callback */ + udata.f = idx_info->f; + udata.dxpl_id = idx_info->dxpl_id; + + /* Iterate over the chunk addresses in the extensible array, deleting each chunk */ + if(H5D__earray_idx_iterate(idx_info, H5D__earray_idx_delete_cb, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses") + + /* Close extensible array */ + if(H5EA_close(idx_info->storage->u.earray.ea, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array") + idx_info->storage->u.earray.ea = NULL; + + /* Set up the context user data */ + ctx_udata.f = idx_info->f; + ctx_udata.chunk_size = idx_info->layout->size; + + /* Delete extensible array */ + if(H5EA_delete(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &ctx_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk extensible array") + idx_info->storage->idx_addr = HADDR_UNDEF; + } /* end if */ + else + HDassert(NULL == idx_info->storage->u.earray.ea); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_delete() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_copy_setup + * + * Purpose: Set up any necessary information for copying chunks + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info_src); + HDassert(idx_info_src->f); + HDassert(idx_info_src->pline); + HDassert(idx_info_src->layout); + HDassert(idx_info_src->storage); + HDassert(idx_info_dst); + HDassert(idx_info_dst->f); + HDassert(idx_info_dst->pline); + HDassert(idx_info_dst->layout); + HDassert(idx_info_dst->storage); + HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr)); + + /* Check if the source extensible array is open yet */ + if(NULL == idx_info_src->storage->u.earray.ea) + /* Open the extensible array in file */ + if(H5D__earray_idx_open(idx_info_src) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array") + + /* Set copied metadata tag */ + H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL); + + /* Create the extensible array that describes chunked storage in the dest. file */ + if(H5D__earray_idx_create(idx_info_dst) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") + HDassert(H5F_addr_defined(idx_info_dst->storage->idx_addr)); + + /* Reset metadata tag */ + H5_END_TAG(FAIL); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_copy_setup() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_copy_shutdown + * + * Purpose: Shutdown any information from copying chunks + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, + H5O_storage_chunk_t *storage_dst, hid_t dxpl_id) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(storage_src); + HDassert(storage_src->u.earray.ea); + HDassert(storage_dst); + HDassert(storage_dst->u.earray.ea); + + /* Close extensible arrays */ + if(H5EA_close(storage_src->u.earray.ea, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array") + storage_src->u.earray.ea = NULL; + if(H5EA_close(storage_dst->u.earray.ea, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array") + storage_dst->u.earray.ea = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_copy_shutdown() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_size + * + * Purpose: Retrieve the amount of index storage for chunked dataset + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) +{ + H5EA_t *ea; /* Pointer to extensible array structure */ + H5EA_stat_t ea_stat; /* Extensible array statistics */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(index_size); + + /* Open the extensible array in file */ + if(H5D__earray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open extensible array") + + /* Set convenience pointer to extensible array structure */ + ea = idx_info->storage->u.earray.ea; + + /* Get the extensible array statistics */ + if(H5EA_get_stats(ea, &ea_stat) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query extensible array statistics") + + /* Set the size of the extensible array */ + *index_size = ea_stat.computed.hdr_size + ea_stat.computed.index_blk_size + + ea_stat.stored.super_blk_size + ea_stat.stored.data_blk_size; + +done: + if(idx_info->storage->u.earray.ea) { + if(H5EA_close(idx_info->storage->u.earray.ea, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array") + idx_info->storage->u.earray.ea = NULL; + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_size() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_reset + * + * Purpose: Reset indexing information. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + + /* Reset index info */ + if(reset_addr) { + storage->idx_addr = HADDR_UNDEF; + storage->u.earray.dset_ohdr_addr = HADDR_UNDEF; + } /* end if */ + storage->u.earray.ea = NULL; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_idx_reset() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_dump + * + * Purpose: Dump indexing information to a stream. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + HDassert(stream); + + HDfprintf(stream, " Address: %a\n", storage->idx_addr); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__earray_idx_dump() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__earray_idx_dest + * + * Purpose: Release indexing information in memory. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Saturday, January 31, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__earray_idx_dest(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->storage); + + /* Check if the extensible array is open */ + if(idx_info->storage->u.earray.ea) { + /* Close extensible array */ + if(H5EA_close(idx_info->storage->u.earray.ea, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close extensible array") + idx_info->storage->u.earray.ea = NULL; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__earray_idx_dest() */ + diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c new file mode 100644 index 0000000..af60bb1 --- /dev/null +++ b/src/H5Dfarray.c @@ -0,0 +1,1638 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * Copyright by the Board of Trustees of the University of Illinois. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the files COPYING and Copyright.html. COPYING can be found at the root * + * of the source code distribution tree; Copyright.html can be found at the * + * root level of an installed copy of the electronic HDF5 document set and * + * is linked from the top-level documents page. It can also be found at * + * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * + * access to either file, you may request a copy from help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Programmer: Vailin Choi <vchoi@hdfgroup.org> + * Thursday, April 30, 2009 + * + * Purpose: Fixed array indexed (chunked) I/O functions. + * The chunk coordinate is mapped as an index into an array of + * disk addresses for the chunks. + * + */ + +/****************/ +/* Module Setup */ +/****************/ + +#include "H5Dmodule.h" /* This source code file is part of the H5D module */ + + +/***********/ +/* Headers */ +/***********/ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Datasets */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5FAprivate.h" /* Fixed arrays */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5MFprivate.h" /* File space management */ +#include "H5VMprivate.h" /* Vector functions */ + + +/****************/ +/* Local Macros */ +/****************/ + +/* Value to fill unset array elements with */ +#define H5D_FARRAY_FILL HADDR_UNDEF +#define H5D_FARRAY_FILT_FILL {HADDR_UNDEF, 0, 0} + + +/******************/ +/* Local Typedefs */ +/******************/ + +/* Fixed array create/open user data */ +typedef struct H5D_farray_ctx_ud_t { + const H5F_t *f; /* Pointer to file info */ + uint32_t chunk_size; /* Size of chunk (bytes) */ +} H5D_farray_ctx_ud_t; + +/* Fixed array callback context */ +typedef struct H5D_farray_ctx_t { + size_t file_addr_len; /* Size of addresses in the file (bytes) */ + size_t chunk_size_len; /* Size of chunk sizes in the file (bytes) */ +} H5D_farray_ctx_t; + +/* User data for chunk deletion callback */ +typedef struct H5D_farray_del_ud_t { + H5F_t *f; /* File pointer for operation */ + hid_t dxpl_id; /* DXPL ID for operation */ + hbool_t filtered; /* Whether the chunks are filtered */ + uint32_t unfilt_size; /* Size of unfiltered chunk in bytes */ +} H5D_farray_del_ud_t; + +/* Fixed Array callback info for iteration over chunks */ +typedef struct H5D_farray_it_ud_t { + H5D_chunk_common_ud_t common; /* Common info for Fixed Array user data (must be first) */ + H5D_chunk_rec_t chunk_rec; /* Generic chunk record for callback */ + hbool_t filtered; /* Whether the chunks are filtered */ + H5D_chunk_cb_func_t cb; /* Chunk callback routine */ + void *udata; /* User data for chunk callback routine */ +} H5D_farray_it_ud_t; + +/* Native fixed array element for chunks w/filters */ +typedef struct H5D_farray_filt_elmt_t { + haddr_t addr; /* Address of chunk */ + uint32_t nbytes; /* Size of chunk (in file) */ + uint32_t filter_mask; /* Excluded filters for chunk */ +} H5D_farray_filt_elmt_t; + + +/********************/ +/* Local Prototypes */ +/********************/ + +/* Fixed Array iterator callbacks */ +static int H5D__farray_idx_iterate_cb(hsize_t idx, const void *_elmt, void *_udata); +static int H5D__farray_idx_delete_cb(hsize_t idx, const void *_elmt, void *_udata); + +/* Fixed array class callbacks for chunks w/o filters */ +static void *H5D__farray_crt_context(void *udata); +static herr_t H5D__farray_dst_context(void *ctx); +static herr_t H5D__farray_fill(void *nat_blk, size_t nelmts); +static herr_t H5D__farray_encode(void *raw, const void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__farray_decode(const void *raw, void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__farray_debug(FILE *stream, int indent, int fwidth, + hsize_t idx, const void *elmt); +static void *H5D__farray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, + haddr_t obj_addr); +static herr_t H5D__farray_dst_dbg_context(void *dbg_ctx); + +/* Fixed array class callbacks for chunks w/filters */ +/* (some shared with callbacks for chunks w/o filters) */ +static herr_t H5D__farray_filt_fill(void *nat_blk, size_t nelmts); +static herr_t H5D__farray_filt_encode(void *raw, const void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__farray_filt_decode(const void *raw, void *elmt, size_t nelmts, + void *ctx); +static herr_t H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, + hsize_t idx, const void *elmt); + +/* Chunked layout indexing callbacks */ +static herr_t H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, + const H5S_t *space, haddr_t dset_ohdr_addr); +static herr_t H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info); +static hbool_t H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage); +static herr_t H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata, const H5D_t *dset); +static herr_t H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_ud_t *udata); +static int H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata); +static herr_t H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_common_ud_t *udata); +static herr_t H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info); +static herr_t H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst); +static herr_t H5D__farray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, + H5O_storage_chunk_t *storage_dst, hid_t dxpl_id); +static herr_t H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, + hsize_t *size); +static herr_t H5D__farray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr); +static herr_t H5D__farray_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream); +static herr_t H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info); + +/* Generic extensible array routines */ +static herr_t H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info); + +/*********************/ +/* Package Variables */ +/*********************/ + +/* Fixed array indexed chunk I/O ops */ +const H5D_chunk_ops_t H5D_COPS_FARRAY[1] = {{ + H5D__farray_idx_init, /* init */ + H5D__farray_idx_create, /* create */ + H5D__farray_idx_is_space_alloc, /* is_space_alloc */ + H5D__farray_idx_insert, /* insert */ + H5D__farray_idx_get_addr, /* get_addr */ + NULL, /* resize */ + H5D__farray_idx_iterate, /* iterate */ + H5D__farray_idx_remove, /* remove */ + H5D__farray_idx_delete, /* delete */ + H5D__farray_idx_copy_setup, /* copy_setup */ + H5D__farray_idx_copy_shutdown, /* copy_shutdown */ + H5D__farray_idx_size, /* size */ + H5D__farray_idx_reset, /* reset */ + H5D__farray_idx_dump, /* dump */ + H5D__farray_idx_dest /* destroy */ +}}; + + +/*****************************/ +/* Library Private Variables */ +/*****************************/ + + +/*******************/ +/* Local Variables */ +/*******************/ + +/* Fixed array class callbacks for dataset chunks w/o filters */ +const H5FA_class_t H5FA_CLS_CHUNK[1]={{ + H5FA_CLS_CHUNK_ID, /* Type of fixed array */ + "Chunk w/o filters", /* Name of fixed array class */ + sizeof(haddr_t), /* Size of native element */ + H5D__farray_crt_context, /* Create context */ + H5D__farray_dst_context, /* Destroy context */ + H5D__farray_fill, /* Fill block of missing elements callback */ + H5D__farray_encode, /* Element encoding callback */ + H5D__farray_decode, /* Element decoding callback */ + H5D__farray_debug, /* Element debugging callback */ + H5D__farray_crt_dbg_context, /* Create debugging context */ + H5D__farray_dst_dbg_context /* Destroy debugging context */ +}}; + +/* Fixed array class callbacks for dataset chunks w/filters */ +const H5FA_class_t H5FA_CLS_FILT_CHUNK[1]={{ + H5FA_CLS_FILT_CHUNK_ID, /* Type of fixed array */ + "Chunk w/filters", /* Name of fixed array class */ + sizeof(H5D_farray_filt_elmt_t), /* Size of native element */ + H5D__farray_crt_context, /* Create context */ + H5D__farray_dst_context, /* Destroy context */ + H5D__farray_filt_fill, /* Fill block of missing elements callback */ + H5D__farray_filt_encode, /* Element encoding callback */ + H5D__farray_filt_decode, /* Element decoding callback */ + H5D__farray_filt_debug, /* Element debugging callback */ + H5D__farray_crt_dbg_context, /* Create debugging context */ + H5D__farray_dst_dbg_context /* Destroy debugging context */ +}}; + +/* Declare a free list to manage the H5D_farray_ctx_t struct */ +H5FL_DEFINE_STATIC(H5D_farray_ctx_t); + +/* Declare a free list to manage the H5D_farray_ctx_ud_t struct */ +H5FL_DEFINE_STATIC(H5D_farray_ctx_ud_t); + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_crt_context + * + * Purpose: Create context for callbacks + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static void * +H5D__farray_crt_context(void *_udata) +{ + H5D_farray_ctx_t *ctx; /* Fixed array callback context */ + H5D_farray_ctx_ud_t *udata = (H5D_farray_ctx_ud_t *)_udata; /* User data for fixed array context */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(udata); + HDassert(udata->f); + HDassert(udata->chunk_size > 0); + + /* Allocate new context structure */ + if(NULL == (ctx = H5FL_MALLOC(H5D_farray_ctx_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context") + + /* Initialize the context */ + ctx->file_addr_len = H5F_SIZEOF_ADDR(udata->f); + + /* Compute the size required for encoding the size of a chunk, allowing + * for an extra byte, in case the filter makes the chunk larger. + */ + ctx->chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)udata->chunk_size) + 8) / 8); + if(ctx->chunk_size_len > 8) + ctx->chunk_size_len = 8; + + /* Set return value */ + ret_value = ctx; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_crt_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_dst_context + * + * Purpose: Destroy context for callbacks + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_dst_context(void *_ctx) +{ + H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(ctx); + + /* Release context structure */ + ctx = H5FL_FREE(H5D_farray_ctx_t, ctx); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_dst_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_fill + * + * Purpose: Fill "missing elements" in block of elements + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_fill(void *nat_blk, size_t nelmts) +{ + haddr_t fill_val = H5D_FARRAY_FILL; /* Value to fill elements with */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(nat_blk); + HDassert(nelmts); + + H5VM_array_fill(nat_blk, &fill_val, H5FA_CLS_CHUNK->nat_elmt_size, nelmts); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_fill() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_encode + * + * Purpose: Encode an element from "native" to "raw" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_encode(void *raw, const void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */ + const haddr_t *elmt = (const haddr_t *)_elmt; /* Convenience pointer to native elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + HDassert(ctx); + + /* Encode native elements into raw elements */ + while(nelmts) { + /* Encode element */ + /* (advances 'raw' pointer) */ + H5F_addr_encode_len(ctx->file_addr_len, (uint8_t **)&raw, *elmt); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to encode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_encode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_decode + * + * Purpose: Decode an element from "raw" to "native" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */ + haddr_t *elmt = (haddr_t *)_elmt; /* Convenience pointer to native elements */ + const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + + /* Decode raw elements into native elements */ + while(nelmts) { + /* Decode element */ + /* (advances 'raw' pointer) */ + H5F_addr_decode_len(ctx->file_addr_len, &raw, elmt); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to decode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_decode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_debug + * + * Purpose: Display an element for debugging + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_debug(FILE *stream, int indent, int fwidth, hsize_t idx, + const void *elmt) +{ + char temp_str[128]; /* Temporary string, for formatting */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(stream); + HDassert(elmt); + + /* Print element */ + sprintf(temp_str, "Element #%llu:", (unsigned long long)idx); + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, temp_str, + *(const haddr_t *)elmt); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_debug() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_crt_dbg_context + * + * Purpose: Create context for debugging callback + * (get the layout message in the specified object header) + * + * Return: Success: non-NULL + * Failure: NULL + * + * Programmer: Vailin Choi + * 5th August, 2009 + * + *------------------------------------------------------------------------- + */ +static void * +H5D__farray_crt_dbg_context(H5F_t *f, hid_t dxpl_id, haddr_t obj_addr) +{ + H5D_farray_ctx_ud_t *dbg_ctx = NULL; /* Context for fixed array callback */ + H5O_loc_t obj_loc; /* Pointer to an object's location */ + hbool_t obj_opened = FALSE; /* Flag to indicate that the object header was opened */ + H5O_layout_t layout; /* Layout message */ + void *ret_value = NULL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(f); + HDassert(H5F_addr_defined(obj_addr)); + + /* Allocate context for debugging callback */ + if(NULL == (dbg_ctx = H5FL_MALLOC(H5D_farray_ctx_ud_t))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate fixed array client callback context") + + /* Set up the object header location info */ + H5O_loc_reset(&obj_loc); + obj_loc.file = f; + obj_loc.addr = obj_addr; + + /* Open the object header where the layout message resides */ + if(H5O_open(&obj_loc) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, NULL, "can't open object header") + obj_opened = TRUE; + + /* Read the layout message */ + if(NULL == H5O_msg_read(&obj_loc, H5O_LAYOUT_ID, &layout, dxpl_id)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "can't get layout info") + + /* close the object header */ + if(H5O_close(&obj_loc) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header") + + /* Create user data */ + dbg_ctx->f = f; + dbg_ctx->chunk_size = layout.u.chunk.size; + + /* Set return value */ + ret_value = dbg_ctx; + +done: + /* Cleanup on error */ + if(ret_value == NULL) { + /* Release context structure */ + if(dbg_ctx) + dbg_ctx = H5FL_FREE(H5D_farray_ctx_ud_t, dbg_ctx); + + /* Close object header */ + if(obj_opened) { + if(H5O_close(&obj_loc) < 0) + HDONE_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, NULL, "can't close object header") + } /* end if */ + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_crt_dbg_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_dst_dbg_context + * + * Purpose: Destroy context for debugging callback + * (free the layout message from the specified object header) + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Quincey Koziol + * 24th September, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_dst_dbg_context(void *_dbg_ctx) +{ + H5D_farray_ctx_ud_t *dbg_ctx = (H5D_farray_ctx_ud_t *)_dbg_ctx; /* Context for fixed array callback */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(dbg_ctx); + + /* Release context structure */ + dbg_ctx = H5FL_FREE(H5D_farray_ctx_ud_t, dbg_ctx); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_dst_dbg_context() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_filt_fill + * + * Purpose: Fill "missing elements" in block of elements + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_filt_fill(void *nat_blk, size_t nelmts) +{ + H5D_farray_filt_elmt_t fill_val = H5D_FARRAY_FILT_FILL; /* Value to fill elements with */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(nat_blk); + HDassert(nelmts); + HDassert(sizeof(fill_val) == H5FA_CLS_FILT_CHUNK->nat_elmt_size); + + H5VM_array_fill(nat_blk, &fill_val, H5FA_CLS_FILT_CHUNK->nat_elmt_size, nelmts); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_filt_fill() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_filt_encode + * + * Purpose: Encode an element from "native" to "raw" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_filt_encode(void *_raw, const void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */ + uint8_t *raw = (uint8_t *)_raw; /* Convenience pointer to raw elements */ + const H5D_farray_filt_elmt_t *elmt = (const H5D_farray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + HDassert(ctx); + + /* Encode native elements into raw elements */ + while(nelmts) { + /* Encode element */ + /* (advances 'raw' pointer) */ + H5F_addr_encode_len(ctx->file_addr_len, &raw, elmt->addr); + UINT64ENCODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len); + UINT32ENCODE(raw, elmt->filter_mask); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to encode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_filt_encode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_filt_decode + * + * Purpose: Decode an element from "raw" to "native" form + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_filt_decode(const void *_raw, void *_elmt, size_t nelmts, void *_ctx) +{ + H5D_farray_ctx_t *ctx = (H5D_farray_ctx_t *)_ctx; /* Fixed array callback context */ + H5D_farray_filt_elmt_t *elmt = (H5D_farray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */ + const uint8_t *raw = (const uint8_t *)_raw; /* Convenience pointer to raw elements */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(raw); + HDassert(elmt); + HDassert(nelmts); + + /* Decode raw elements into native elements */ + while(nelmts) { + /* Decode element */ + /* (advances 'raw' pointer) */ + H5F_addr_decode_len(ctx->file_addr_len, &raw, &elmt->addr); + UINT64DECODE_VAR(raw, elmt->nbytes, ctx->chunk_size_len); + UINT32DECODE(raw, elmt->filter_mask); + + /* Advance native element pointer */ + elmt++; + + /* Decrement # of elements to decode */ + nelmts--; + } /* end while */ + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_filt_decode() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_filt_debug + * + * Purpose: Display an element for debugging + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, + const void *_elmt) +{ + const H5D_farray_filt_elmt_t *elmt = (const H5D_farray_filt_elmt_t *)_elmt; /* Convenience pointer to native elements */ + char temp_str[128]; /* Temporary string, for formatting */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(stream); + HDassert(elmt); + + /* Print element */ + sprintf(temp_str, "Element #%llu:", (unsigned long long)idx); + HDfprintf(stream, "%*s%-*s {%a, %u, %0x}\n", indent, "", fwidth, temp_str, + elmt->addr, elmt->nbytes, elmt->filter_mask); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_filt_debug() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_init + * + * Purpose: Initialize the indexing information for a dataset. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * Wednensday, May 23, 2012 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_init(const H5D_chk_idx_info_t *idx_info, const H5S_t H5_ATTR_UNUSED *space, haddr_t dset_ohdr_addr) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(dset_ohdr_addr)); + + idx_info->storage->u.farray.dset_ohdr_addr = dset_ohdr_addr; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_idx_init() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_open + * + * Purpose: Opens an existing fixed array and initializes + * the layout struct with information about the storage. + * + * Return: Success: non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_open(const H5D_chk_idx_info_t *idx_info) +{ + H5D_farray_ctx_ud_t udata; /* User data for fixed array open call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->layout->idx_type); + HDassert(idx_info->storage); + HDassert(H5D_CHUNK_IDX_FARRAY == idx_info->storage->idx_type); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(NULL == idx_info->storage->u.farray.fa); + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Open the fixed array for the chunk index */ + if(NULL == (idx_info->storage->u.farray.fa = H5FA_open(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't open fixed array") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_open() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_create + * + * Purpose: Creates a new indexed-storage fixed array and initializes + * the layout struct with information about the storage. The + * struct should be immediately written to the object header. + * + * This function must be called before passing LAYOUT to any of + * the other indexed storage functions! + * + * Return: Non-negative on success (with the LAYOUT argument initialized + * and ready to write to an object header). Negative on failure. + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_create(const H5D_chk_idx_info_t *idx_info) +{ + H5FA_create_t cparam; /* Fixed array creation parameters */ + H5D_farray_ctx_ud_t udata; /* User data for fixed array create call */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(!H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(NULL == idx_info->storage->u.farray.fa); + HDassert(idx_info->layout->nchunks); + + /* General parameters */ + if(idx_info->pline->nused > 0) { + unsigned chunk_size_len; /* Size of encoded chunk size */ + + /* Compute the size required for encoding the size of a chunk, allowing + * for an extra byte, in case the filter makes the chunk larger. + */ + chunk_size_len = 1 + ((H5VM_log2_gen((uint64_t)idx_info->layout->size) + 8) / 8); + if(chunk_size_len > 8) + chunk_size_len = 8; + + cparam.cls = H5FA_CLS_FILT_CHUNK; + cparam.raw_elmt_size = (uint8_t)(H5F_SIZEOF_ADDR(idx_info->f) + chunk_size_len + 4); + } /* end if */ + else { + cparam.cls = H5FA_CLS_CHUNK; + cparam.raw_elmt_size = (uint8_t)H5F_SIZEOF_ADDR(idx_info->f); + } /* end else */ + cparam.max_dblk_page_nelmts_bits = idx_info->layout->u.farray.cparam.max_dblk_page_nelmts_bits; + HDassert(cparam.max_dblk_page_nelmts_bits > 0); + cparam.nelmts = idx_info->layout->max_nchunks; + + /* Set up the user data */ + udata.f = idx_info->f; + udata.chunk_size = idx_info->layout->size; + + /* Create the fixed array for the chunk index */ + if(NULL == (idx_info->storage->u.farray.fa = H5FA_create(idx_info->f, idx_info->dxpl_id, &cparam, &udata))) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't create fixed array") + + /* Get the address of the fixed array in file */ + if(H5FA_get_addr(idx_info->storage->u.farray.fa, &(idx_info->storage->idx_addr)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array address") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_create() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_is_space_alloc + * + * Purpose: Query if space is allocated for index method + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static hbool_t +H5D__farray_idx_is_space_alloc(const H5O_storage_chunk_t *storage) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + + FUNC_LEAVE_NOAPI((hbool_t)H5F_addr_defined(storage->idx_addr)) +} /* end H5D__farray_idx_is_space_alloc() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_insert + * + * Purpose: Insert chunk address into the indexing structure. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi; 5 May 2014 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_insert(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata, + const H5D_t H5_ATTR_UNUSED *dset) +{ + H5FA_t *fa; /* Pointer to fixed array structure */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the fixed array is open yet */ + if(NULL == idx_info->storage->u.farray.fa) { + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + } else /* Patch the top level file pointer contained in fa if needed */ + H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f); + + /* Set convenience pointer to fixed array structure */ + fa = idx_info->storage->u.farray.fa; + + if(!H5F_addr_defined(udata->chunk_block.offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "The chunk should have allocated already") + if(udata->chunk_idx != (udata->chunk_idx & 0xffffffff)) /* negative value */ + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "chunk index must be less than 2^32") + + /* Check for filters on chunks */ + if(idx_info->pline->nused > 0) { + H5D_farray_filt_elmt_t elmt; /* Fixed array element */ + + elmt.addr = udata->chunk_block.offset; + H5_CHECKED_ASSIGN(elmt.nbytes, uint32_t, udata->chunk_block.length, hsize_t); + elmt.filter_mask = udata->filter_mask; + + /* Set the info for the chunk */ + if(H5FA_set(fa, idx_info->dxpl_id, udata->chunk_idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk info") + } /* end if */ + else { + /* Set the address for the chunk */ + if(H5FA_set(fa, idx_info->dxpl_id, udata->chunk_idx, &udata->chunk_block.offset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set chunk address") + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__farray_idx_insert() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_get_addr + * + * Purpose: Get the file address of a chunk if file space has been + * assigned. Save the retrieved information in the udata + * supplied. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *udata) +{ + H5FA_t *fa; /* Pointer to fixed array structure */ + hsize_t idx; /* Array index of chunk */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the fixed array is open yet */ + if(NULL == idx_info->storage->u.farray.fa) { + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + } else /* Patch the top level file pointer contained in fa if needed */ + H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f); + + /* Set convenience pointer to fixed array structure */ + fa = idx_info->storage->u.farray.fa; + + /* Calculate the index of this chunk */ + idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, udata->common.scaled); + + udata->chunk_idx = idx; + + /* Check for filters on chunks */ + if(idx_info->pline->nused > 0) { + H5D_farray_filt_elmt_t elmt; /* Fixed array element */ + + /* Get the information for the chunk */ + if(H5FA_get(fa, idx_info->dxpl_id, idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info") + + /* Set the info for the chunk */ + udata->chunk_block.offset = elmt.addr; + udata->chunk_block.length = elmt.nbytes; + udata->filter_mask = elmt.filter_mask; + } /* end if */ + else { + /* Get the address for the chunk */ + if(H5FA_get(fa, idx_info->dxpl_id, idx, &udata->chunk_block.offset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") + + /* Update the other (constant) information for the chunk */ + udata->chunk_block.length = idx_info->layout->size; + udata->filter_mask = 0; + } /* end else */ + + if(!H5F_addr_defined(udata->chunk_block.offset)) + udata->chunk_block.length = 0; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__farray_idx_get_addr() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_iterate_cb + * + * Purpose: Callback routine for fixed array element iteration. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static int +H5D__farray_idx_iterate_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void *_udata) +{ + H5D_farray_it_ud_t *udata = (H5D_farray_it_ud_t *)_udata; /* User data */ + unsigned ndims; /* Rank of chunk */ + int curr_dim; /* Current dimension */ + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Compose generic chunk record for callback */ + if(udata->filtered) { + const H5D_farray_filt_elmt_t *filt_elmt = (const H5D_farray_filt_elmt_t *)_elmt; + + udata->chunk_rec.chunk_addr = filt_elmt->addr; + udata->chunk_rec.nbytes = filt_elmt->nbytes; + udata->chunk_rec.filter_mask = filt_elmt->filter_mask; + } /* end if */ + else + udata->chunk_rec.chunk_addr = *(const haddr_t *)_elmt; + + /* Make "generic chunk" callback */ + if(H5F_addr_defined(udata->chunk_rec.chunk_addr)) + if((ret_value = (udata->cb)(&udata->chunk_rec, udata->udata)) < 0) + HERROR(H5E_DATASET, H5E_CALLBACK, "failure in generic chunk iterator callback"); + + /* Update coordinates of chunk in dataset */ + ndims = udata->common.layout->ndims - 1; + HDassert(ndims > 0); + curr_dim = (int)(ndims - 1); + while(curr_dim >= 0) { + /* Increment coordinate in current dimension */ + udata->chunk_rec.scaled[curr_dim]++; + + /* Check if we went off the end of the current dimension */ + if(udata->chunk_rec.scaled[curr_dim] >= udata->common.layout->chunks[curr_dim]) { + /* Reset coordinate & move to next faster dimension */ + udata->chunk_rec.scaled[curr_dim] = 0; + curr_dim--; + } /* end if */ + else + break; + } /* end while */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__farray_idx_iterate_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_iterate + * + * Purpose: Iterate over the chunks in an index, making a callback + * for each one. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static int +H5D__farray_idx_iterate(const H5D_chk_idx_info_t *idx_info, + H5D_chunk_cb_func_t chunk_cb, void *chunk_udata) +{ + H5FA_t *fa; /* Pointer to fixed array structure */ + H5FA_stat_t fa_stat; /* Fixed array statistics */ + int ret_value = FAIL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(chunk_cb); + HDassert(chunk_udata); + + /* Check if the fixed array is open yet */ + if(NULL == idx_info->storage->u.farray.fa) { + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + } else /* Patch the top level file pointer contained in fa if needed */ + H5FA_patch_file(idx_info->storage->u.farray.fa, idx_info->f); + + /* Set convenience pointer to fixed array structure */ + fa = idx_info->storage->u.farray.fa; + + /* Get the fixed array statistics */ + if(H5FA_get_stats(fa, &fa_stat) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array statistics") + + /* Check if there are any array elements */ + if(fa_stat.nelmts > 0) { + H5D_farray_it_ud_t udata; /* User data for iteration callback */ + + /* Initialize userdata */ + HDmemset(&udata, 0, sizeof udata); + udata.common.layout = idx_info->layout; + udata.common.storage = idx_info->storage; + HDmemset(&udata.chunk_rec, 0, sizeof(udata.chunk_rec)); + udata.filtered = (idx_info->pline->nused > 0); + if(!udata.filtered) { + udata.chunk_rec.nbytes = idx_info->layout->size; + udata.chunk_rec.filter_mask = 0; + } /* end if */ + udata.cb = chunk_cb; + udata.udata = chunk_udata; + + /* Iterate over the fixed array elements */ + if((ret_value = H5FA_iterate(fa, idx_info->dxpl_id, H5D__farray_idx_iterate_cb, &udata)) < 0) + HERROR(H5E_DATASET, H5E_BADITER, "unable to iterate over fixed array chunk index"); + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_iterate() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_remove + * + * Purpose: Remove chunk from index. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t *udata) +{ + H5FA_t *fa; /* Pointer to fixed array structure */ + hsize_t idx; /* Array index of chunk */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(udata); + + /* Check if the fixed array is open yet */ + if(NULL == idx_info->storage->u.farray.fa) + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + + /* Set convenience pointer to fixed array structure */ + fa = idx_info->storage->u.farray.fa; + + /* Calculate the index of this chunk */ + idx = H5VM_array_offset_pre((idx_info->layout->ndims - 1), idx_info->layout->max_down_chunks, udata->scaled); + + /* Check for filters on chunks */ + if(idx_info->pline->nused > 0) { + H5D_farray_filt_elmt_t elmt; /* Fixed array element */ + + /* Get the info about the chunk for the index */ + if(H5FA_get(fa, idx_info->dxpl_id, idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk info") + + /* Remove raw data chunk from file */ + HDassert(H5F_addr_defined(elmt.addr)); + H5_CHECK_OVERFLOW(elmt.nbytes, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, elmt.addr, (hsize_t)elmt.nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + + /* Reset the info about the chunk for the index */ + elmt.addr = HADDR_UNDEF; + elmt.nbytes = 0; + elmt.filter_mask = 0; + if(H5FA_set(fa, idx_info->dxpl_id, idx, &elmt) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk info") + } /* end if */ + else { + haddr_t addr = HADDR_UNDEF; /* Chunk address */ + + /* Get the address of the chunk for the index */ + if(H5FA_get(fa, idx_info->dxpl_id, idx, &addr) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address") + + /* Remove raw data chunk from file */ + HDassert(H5F_addr_defined(addr)); + H5_CHECK_OVERFLOW(idx_info->layout->size, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(idx_info->f, H5FD_MEM_DRAW, idx_info->dxpl_id, addr, (hsize_t)idx_info->layout->size) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to free chunk") + + /* Reset the address of the chunk for the index */ + addr = HADDR_UNDEF; + if(H5FA_set(fa, idx_info->dxpl_id, idx, &addr) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "unable to reset chunk address") + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__farray_idx_remove() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_delete_cb + * + * Purpose: Delete space for chunk in file + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static int +H5D__farray_idx_delete_cb(hsize_t H5_ATTR_UNUSED idx, const void *_elmt, void *_udata) +{ + H5D_farray_del_ud_t *udata = (H5D_farray_del_ud_t *)_udata; /* User data for callback */ + haddr_t chunk_addr; /* Address of chunk */ + uint32_t nbytes; /* Size of chunk */ + int ret_value = H5_ITER_CONT; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(_elmt); + HDassert(udata); + HDassert(udata->f); + + /* Check for filtered elements */ + if(udata->filtered) { + const H5D_farray_filt_elmt_t *filt_elmt = (const H5D_farray_filt_elmt_t *)_elmt; + + chunk_addr = filt_elmt->addr; + nbytes = filt_elmt->nbytes; + } /* end if */ + else { + chunk_addr = *(const haddr_t *)_elmt; + nbytes = udata->unfilt_size; + } /* end else */ + + /* Remove raw data chunk from file */ + H5_CHECK_OVERFLOW(nbytes, /*From: */uint32_t, /*To: */hsize_t); + if(H5MF_xfree(udata->f, H5FD_MEM_DRAW, udata->dxpl_id, chunk_addr, (hsize_t)nbytes) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, H5_ITER_ERROR, "unable to free chunk") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_delete_cb() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_delete + * + * Purpose: Delete index and raw data storage for entire dataset + * (i.e. all chunks) + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_delete(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + + /* Check if the index data structure has been allocated */ + if(H5F_addr_defined(idx_info->storage->idx_addr)) { + H5FA_t *fa; /* Pointer to fixed array structure */ + H5FA_stat_t fa_stat; /* Fixed array statistics */ + H5D_farray_ctx_ud_t ctx_udata; /* User data for fixed array open call */ + + /* Check if the fixed array is open yet */ + if(NULL == idx_info->storage->u.farray.fa) + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + + /* Set convenience pointer to fixed array structure */ + fa = idx_info->storage->u.farray.fa; + + /* Get the fixed array statistics */ + if(H5FA_get_stats(fa, &fa_stat) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array statistics") + + /* Check if there are any array elements */ + if(fa_stat.nelmts > 0) { + H5D_farray_del_ud_t udata; /* User data for callback */ + + /* Initialize user data for callback */ + udata.f = idx_info->f; + udata.dxpl_id = idx_info->dxpl_id; + udata.filtered = (idx_info->pline->nused > 0); + udata.unfilt_size = idx_info->layout->size; + + /* Iterate over the chunk addresses in the fixed array, deleting each chunk */ + if(H5FA_iterate(fa, idx_info->dxpl_id, H5D__farray_idx_delete_cb, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk addresses") + } /* end if */ + + /* Close fixed array */ + if(H5FA_close(idx_info->storage->u.farray.fa, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array") + idx_info->storage->u.farray.fa = NULL; + + /* Set up the user data */ + ctx_udata.f = idx_info->f; + ctx_udata.chunk_size = idx_info->layout->size; + + /* Delete fixed array */ + if(H5FA_delete(idx_info->f, idx_info->dxpl_id, idx_info->storage->idx_addr, &ctx_udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTDELETE, FAIL, "unable to delete chunk fixed array") + idx_info->storage->idx_addr = HADDR_UNDEF; + } /* end if */ + else + HDassert(NULL == idx_info->storage->u.farray.fa); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_delete() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_copy_setup + * + * Purpose: Set up any necessary information for copying chunks + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_copy_setup(const H5D_chk_idx_info_t *idx_info_src, + const H5D_chk_idx_info_t *idx_info_dst) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info_src); + HDassert(idx_info_src->f); + HDassert(idx_info_src->pline); + HDassert(idx_info_src->layout); + HDassert(idx_info_src->storage); + HDassert(idx_info_dst); + HDassert(idx_info_dst->f); + HDassert(idx_info_dst->pline); + HDassert(idx_info_dst->layout); + HDassert(idx_info_dst->storage); + HDassert(!H5F_addr_defined(idx_info_dst->storage->idx_addr)); + + /* Check if the source fixed array is open yet */ + if(NULL == idx_info_src->storage->u.farray.fa) + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info_src) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + + /* Set copied metadata tag */ + H5_BEGIN_TAG(idx_info_dst->dxpl_id, H5AC__COPIED_TAG, FAIL); + + /* Create the fixed array that describes chunked storage in the dest. file */ + if(H5D__farray_idx_create(idx_info_dst) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunked storage") + HDassert(H5F_addr_defined(idx_info_dst->storage->idx_addr)); + + /* Reset metadata tag */ + H5_END_TAG(FAIL); + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_copy_setup() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_copy_shutdown + * + * Purpose: Shutdown any information from copying chunks + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_copy_shutdown(H5O_storage_chunk_t *storage_src, + H5O_storage_chunk_t *storage_dst, hid_t dxpl_id) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(storage_src); + HDassert(storage_src->u.farray.fa); + HDassert(storage_dst); + HDassert(storage_dst->u.farray.fa); + + /* Close fixed arrays */ + if(H5FA_close(storage_src->u.farray.fa, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array") + storage_src->u.farray.fa = NULL; + if(H5FA_close(storage_dst->u.farray.fa, dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array") + storage_dst->u.farray.fa = NULL; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_copy_shutdown() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_size + * + * Purpose: Retrieve the amount of index storage for chunked dataset + * + * Return: Success: Non-negative + * Failure: negative + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_size(const H5D_chk_idx_info_t *idx_info, hsize_t *index_size) +{ + H5FA_t *fa; /* Pointer to fixed array structure */ + H5FA_stat_t fa_stat; /* Fixed array statistics */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->pline); + HDassert(idx_info->layout); + HDassert(idx_info->storage); + HDassert(H5F_addr_defined(idx_info->storage->idx_addr)); + HDassert(index_size); + + /* Open the fixed array in file */ + if(H5D__farray_idx_open(idx_info) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "can't open fixed array") + + /* Set convenience pointer to fixed array structure */ + fa = idx_info->storage->u.farray.fa; + + /* Get the fixed array statistics */ + if(H5FA_get_stats(fa, &fa_stat) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query fixed array statistics") + + *index_size = fa_stat.hdr_size; + *index_size += fa_stat.dblk_size; + +done: + if(idx_info->storage->u.farray.fa) { + if(H5FA_close(idx_info->storage->u.farray.fa, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array") + idx_info->storage->u.farray.fa = NULL; + } /* end if */ + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_size() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_reset + * + * Purpose: Reset indexing information. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_reset(H5O_storage_chunk_t *storage, hbool_t reset_addr) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + + /* Reset index info */ + if(reset_addr) + storage->idx_addr = HADDR_UNDEF; + storage->u.farray.fa = NULL; + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_idx_reset() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_dump + * + * Purpose: Dump indexing information to a stream. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_dump(const H5O_storage_chunk_t *storage, FILE *stream) +{ + FUNC_ENTER_STATIC_NOERR + + /* Check args */ + HDassert(storage); + HDassert(stream); + + HDfprintf(stream, " Address: %a\n", storage->idx_addr); + + FUNC_LEAVE_NOAPI(SUCCEED) +} /* end H5D__farray_idx_dump() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__farray_idx_dest + * + * Purpose: Release indexing information in memory. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Vailin Choi + * Thursday, April 30, 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5D__farray_idx_dest(const H5D_chk_idx_info_t *idx_info) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC + + /* Check args */ + HDassert(idx_info); + HDassert(idx_info->f); + HDassert(idx_info->storage); + + /* Check if the fixed array is open */ + if(idx_info->storage->u.farray.fa) { + /* Close fixed array */ + if(H5FA_close(idx_info->storage->u.farray.fa, idx_info->dxpl_id) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCLOSEOBJ, FAIL, "unable to close fixed array") + idx_info->storage->u.farray.fa = NULL; + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__farray_idx_dest() */ + diff --git a/src/H5Dint.c b/src/H5Dint.c index 721d390..18be105 100644 --- a/src/H5Dint.c +++ b/src/H5Dint.c @@ -648,7 +648,7 @@ H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type) { htri_t relocatable; /* Flag whether the type is relocatable */ htri_t immutable; /* Flag whether the type is immutable */ - hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ + hbool_t use_latest_format; /* Flag indicating the 'latest datatype version support' is enabled */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -666,8 +666,8 @@ H5D__init_type(H5F_t *file, const H5D_t *dset, hid_t type_id, const H5T_t *type) if((immutable = H5T_is_immutable(type)) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't check datatype?") - /* Get the file's 'use the latest version of the format' flag */ - use_latest_format = H5F_USE_LATEST_FORMAT(file); + /* Get the file's 'use the latest datatype version support' flag */ + use_latest_format = H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DATATYPE); /* Copy the datatype if it's a custom datatype or if it'll change when it's location is changed */ if(!immutable || relocatable || use_latest_format) { @@ -759,7 +759,7 @@ done: static herr_t H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space) { - hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ + hbool_t use_latest_format; /* Flag indicating the 'latest dataspace version support' is enabled */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -769,8 +769,8 @@ H5D__init_space(H5F_t *file, const H5D_t *dset, const H5S_t *space) HDassert(dset); HDassert(space); - /* Get the file's 'use the latest version of the format' flag */ - use_latest_format = H5F_USE_LATEST_FORMAT(file); + /* Get the file's 'use the latest dataspace version support' flag */ + use_latest_format = H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DATASPACE); /* Copy dataspace for dataset */ if(NULL == (dset->shared->space = H5S_copy(space, FALSE, TRUE))) @@ -815,7 +815,6 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id) H5O_loc_t *oloc = NULL; /* Dataset's object location */ H5O_layout_t *layout; /* Dataset's layout information */ H5T_t *type; /* Dataset's datatype */ - hbool_t use_latest_format; /* Flag indicating the newest file format should be used */ H5O_fill_t *fill_prop; /* Pointer to dataset's fill value information */ H5D_fill_value_t fill_status; /* Fill value status */ hbool_t fill_changed = FALSE; /* Flag indicating the fill value was changed */ @@ -834,9 +833,6 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id) type = dset->shared->type; fill_prop = &dset->shared->dcpl_cache.fill; - /* Get the file's 'use the latest version of the format' flag */ - use_latest_format = H5F_USE_LATEST_FORMAT(file); - /* Retrieve "defined" status of fill value */ if(H5P_is_fill_value_defined(fill_prop, &fill_status) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined") @@ -913,8 +909,8 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update new fill value header message") /* If there is valid information for the old fill value struct, add it */ - /* (only if we aren't trying to write the latest version of the file format) */ - if(fill_prop->buf && !use_latest_format) { + /* (only if we aren't trying to write the 'latest fill message version support') */ + if(fill_prop->buf && !(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_FILL_MSG))) { H5O_fill_t old_fill_prop; /* Copy of fill value property, for writing as "old" fill value */ /* Shallow copy the fill value property */ @@ -966,10 +962,10 @@ H5D__update_oh_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, hid_t dapl_id) #endif /* H5O_ENABLE_BOGUS */ /* Add a modification time message, if using older format. */ - /* (If using the latest format, the modification time is part of the object + /* (If using the latest 'no modification time message' version support, the modification time is part of the object * header and doesn't use a separate message -QAK) */ - if(!use_latest_format) + if(!(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_NO_MOD_TIME_MSG))) if(H5O_touch_oh(file, dxpl_id, oh, TRUE) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update modification time message") @@ -1208,14 +1204,28 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, } /* end if */ /* Set the latest version of the layout, pline & fill messages, if requested */ - if(H5F_USE_LATEST_FORMAT(file)) { + if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DSET_MSG_FLAGS)) { /* Set the latest version for the I/O pipeline message */ - if(H5O_pline_set_latest_version(&new_dset->shared->dcpl_cache.pline) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of I/O filter pipeline") - - /* Set the latest version for the fill value message */ - if(H5O_fill_set_latest_version(&new_dset->shared->dcpl_cache.fill) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of fill value") + if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_PLINE_MSG)) + if(H5O_pline_set_latest_version(&new_dset->shared->dcpl_cache.pline) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of I/O filter pipeline") + + /* Set the latest version for the fill message */ + if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_FILL_MSG)) + /* Set the latest version for the fill value message */ + if(H5O_fill_set_latest_version(&new_dset->shared->dcpl_cache.fill) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of fill value") + + /* Set the latest version for the layout message */ + if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_LAYOUT_MSG)) + /* Set the latest version for the layout message */ + if(H5D__layout_set_latest_version(&new_dset->shared->layout, new_dset->shared->space, &new_dset->shared->dcpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest version of layout") + } /* end if */ + else if(new_dset->shared->layout.version >= H5O_LAYOUT_VERSION_4) { + /* Use latest indexing type for layout message version >= 4 */ + if(H5D__layout_set_latest_indexing(&new_dset->shared->layout, new_dset->shared->space, &new_dset->shared->dcpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest indexing") } /* end if */ /* Check if this dataset is going into a parallel file and set space allocation time */ @@ -2658,11 +2668,19 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id) * and if the chunks are written *------------------------------------------------------------------------- */ - if(shrink && H5D_CHUNKED == dset->shared->layout.type && - (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) - /* Remove excess chunks */ - if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks") + if(H5D_CHUNKED == dset->shared->layout.type) { + if(shrink && (*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage)) + /* Remove excess chunks */ + if(H5D__chunk_prune_by_extent(dset, dxpl_id, curr_dims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to remove chunks") + + /* Update chunks that are no longer edge chunks as a result of + * expansion */ + if(expand && (dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) + && (dset->shared->dcpl_cache.pline.nused > 0)) + if(H5D__chunk_update_old_edge_chunks(dset, dxpl_id, curr_dims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to do update old edge chunks") + } /* end if */ /* Mark the dataspace as dirty, for later writing to the file */ if(H5D__mark(dset, dxpl_id, H5D_MARK_SPACE) < 0) diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index bbaa9c5..bf99b9d 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -96,8 +96,28 @@ H5D__layout_set_io_ops(const H5D_t *dataset) dataset->shared->layout.ops = H5D_LOPS_CHUNK; /* Set the chunk operations */ - /* (Only "B-tree" indexing type currently supported) */ - dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_BTREE; + switch(dataset->shared->layout.u.chunk.idx_type) { + case H5D_CHUNK_IDX_BTREE: + dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_BTREE; + break; + + case H5D_CHUNK_IDX_FARRAY: + dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_FARRAY; + break; + + case H5D_CHUNK_IDX_EARRAY: + dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_EARRAY; + break; + + case H5D_CHUNK_IDX_BT2: + dataset->shared->layout.storage.u.chunk.ops = H5D_COPS_BT2; + break; + + case H5D_CHUNK_IDX_NTYPES: + default: + HDassert(0 && "Unknown chunk index method!"); + HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unknown chunk index method") + } /* end switch */ break; case H5D_COMPACT: @@ -164,15 +184,62 @@ H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, hbool_t includ break; case H5D_CHUNKED: - /* Number of dimensions (1 byte) */ - HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); - ret_value++; + if(layout->version < H5O_LAYOUT_VERSION_4) { + /* Number of dimensions (1 byte) */ + HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + ret_value++; - /* Dimension sizes */ - ret_value += layout->u.chunk.ndims * 4; + /* B-tree address */ + ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */ - /* B-tree address */ - ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */ + /* Dimension sizes */ + ret_value += layout->u.chunk.ndims * 4; + } /* end if */ + else { + /* Chunked layout feature flags */ + ret_value++; + + /* Number of dimensions (1 byte) */ + HDassert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + ret_value++; + + /* Encoded # of bytes for each chunk dimension */ + HDassert(layout->u.chunk.enc_bytes_per_dim > 0 && layout->u.chunk.enc_bytes_per_dim <= 8); + ret_value++; + + /* Dimension sizes */ + ret_value += layout->u.chunk.ndims * layout->u.chunk.enc_bytes_per_dim; + + /* Type of chunk index */ + ret_value++; + + switch(layout->u.chunk.idx_type) { + case H5D_CHUNK_IDX_BTREE: + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, 0, "v1 B-tree index type found for layout message >v3") + + case H5D_CHUNK_IDX_FARRAY: + /* Fixed array creation parameters */ + ret_value += H5D_FARRAY_CREATE_PARAM_SIZE; + break; + + case H5D_CHUNK_IDX_EARRAY: + /* Extensible array creation parameters */ + ret_value += H5D_EARRAY_CREATE_PARAM_SIZE; + break; + + case H5D_CHUNK_IDX_BT2: + /* v2 B-tree creation parameters */ + ret_value += H5D_BT2_CREATE_PARAM_SIZE; + break; + + case H5D_CHUNK_IDX_NTYPES: + default: + HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, 0, "Invalid chunk index type") + } /* end switch */ + + /* Chunk index address */ + ret_value += H5F_SIZEOF_ADDR(f); + } /* end else */ break; case H5D_VIRTUAL: @@ -192,6 +259,151 @@ done: /*------------------------------------------------------------------------- + * Function: H5D__layout_set_latest_version + * + * Purpose: Set the encoding for a layout to the latest version. + * Part of the coding in this routine is moved to + * H5D__layout_set_latest_indexing(). + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, January 15, 2009 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__layout_set_latest_version(H5O_layout_t *layout, const H5S_t *space, + const H5D_dcpl_cache_t *dcpl_cache) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + HDassert(layout); + HDassert(space); + HDassert(dcpl_cache); + + /* Set encoding of layout to latest version */ + layout->version = H5O_LAYOUT_VERSION_LATEST; + + /* Set the latest indexing type for the layout message */ + if(H5D__layout_set_latest_indexing(layout, space, dcpl_cache) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest indexing type") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__layout_set_latest_version() */ + + +/*------------------------------------------------------------------------- + * Function: H5D__layout_set_latest_indexing + * + * Purpose: Set the latest indexing type for a layout message + * This is moved from H5D_layout_set_latest_version(). + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Quincey Koziol + * Thursday, January 15, 2009 + * + *------------------------------------------------------------------------- + */ +herr_t +H5D__layout_set_latest_indexing(H5O_layout_t *layout, const H5S_t *space, + const H5D_dcpl_cache_t *dcpl_cache) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_PACKAGE + + /* Sanity check */ + HDassert(layout); + HDassert(space); + HDassert(dcpl_cache); + + /* The indexing methods only apply to chunked datasets (currently) */ + if(layout->type == H5D_CHUNKED) { + int sndims; /* Rank of dataspace */ + unsigned ndims; /* Rank of dataspace */ + + /* Query the dimensionality of the dataspace */ + if((sndims = H5S_GET_EXTENT_NDIMS(space)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "invalid dataspace rank") + ndims = (unsigned)sndims; + + /* Avoid scalar/null dataspace */ + if(ndims > 0) { + hsize_t max_dims[H5O_LAYOUT_NDIMS]; /* Maximum dimension sizes */ + hsize_t cur_dims[H5O_LAYOUT_NDIMS]; /* Current dimension sizes */ + unsigned unlim_count = 0; /* Count of unlimited max. dimensions */ + unsigned u; /* Local index variable */ + + /* Query the dataspace's dimensions */ + if(H5S_get_simple_extent_dims(space, cur_dims, max_dims) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace max. dimensions") + + /* Spin through the max. dimensions, looking for unlimited dimensions */ + for(u = 0; u < ndims; u++) { + if(max_dims[u] == H5S_UNLIMITED) + unlim_count++; + } /* end for */ + + /* Chunked datasets with unlimited dimension(s) */ + if(unlim_count) { /* dataset with unlimited dimension(s) must be chunked */ + if(1 == unlim_count) { /* Chunked dataset with only 1 unlimited dimension */ + /* Set the chunk index type to an extensible array */ + layout->u.chunk.idx_type = H5D_CHUNK_IDX_EARRAY; + layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_EARRAY; + layout->storage.u.chunk.ops = H5D_COPS_EARRAY; + + /* Set the extensible array creation parameters */ + /* (use hard-coded defaults for now, until we give applications + * control over this with a property list - QAK) + */ + layout->u.chunk.u.earray.cparam.max_nelmts_bits = H5D_EARRAY_MAX_NELMTS_BITS; + layout->u.chunk.u.earray.cparam.idx_blk_elmts = H5D_EARRAY_IDX_BLK_ELMTS; + layout->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs = H5D_EARRAY_SUP_BLK_MIN_DATA_PTRS; + layout->u.chunk.u.earray.cparam.data_blk_min_elmts = H5D_EARRAY_DATA_BLK_MIN_ELMTS; + layout->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits = H5D_EARRAY_MAX_DBLOCK_PAGE_NELMTS_BITS; + } /* end if */ + else { /* Chunked dataset with > 1 unlimited dimensions */ + /* Set the chunk index type to v2 B-tree */ + layout->u.chunk.idx_type = H5D_CHUNK_IDX_BT2; + layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BT2; + layout->storage.u.chunk.ops = H5D_COPS_BT2; + + /* Set the v2 B-tree creation parameters */ + /* (use hard-coded defaults for now, until we give applications + * control over this with a property list - QAK) + */ + layout->u.chunk.u.btree2.cparam.node_size = H5D_BT2_NODE_SIZE; + layout->u.chunk.u.btree2.cparam.split_percent = H5D_BT2_SPLIT_PERC; + layout->u.chunk.u.btree2.cparam.merge_percent = H5D_BT2_MERGE_PERC; + } /* end else */ + } /* end if */ + else { + /* Set the chunk index type to Fixed Array */ + layout->u.chunk.idx_type = H5D_CHUNK_IDX_FARRAY; + layout->storage.u.chunk.idx_type = H5D_CHUNK_IDX_FARRAY; + layout->storage.u.chunk.ops = H5D_COPS_FARRAY; + + /* Set the fixed array creation parameters */ + /* (use hard-coded defaults for now, until we give applications + * control over this with a property list - QAK) + */ + layout->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits = H5D_FARRAY_MAX_DBLK_PAGE_NELMTS_BITS; + } /* end else */ + } /* end if */ + } /* end if */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__layout_set_latest_indexing() */ + + +/*------------------------------------------------------------------------- * Function: H5D__layout_oh_create * * Purpose: Create layout/pline/efl information for dataset @@ -398,9 +610,10 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dxpl_id, hid_t dapl_id, H5P_genplist_t /* Copy layout to the DCPL */ if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &dataset->shared->layout) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout") - /* Adjust chunk dimensions back again (*sigh*) */ - if(H5D_CHUNKED == dataset->shared->layout.type) - dataset->shared->layout.u.chunk.ndims++; + + /* Set chunk sizes */ + if(H5D__chunk_set_sizes(dataset) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes") done: FUNC_LEAVE_NOAPI(ret_value) diff --git a/src/H5Doh.c b/src/H5Doh.c index 54f7a5e..5cdf4bc 100644 --- a/src/H5Doh.c +++ b/src/H5Doh.c @@ -55,7 +55,7 @@ static void *H5O__dset_create(H5F_t *f, void *_crt_info, H5G_loc_t *obj_loc, static H5O_loc_t *H5O__dset_get_oloc(hid_t obj_id); static herr_t H5O__dset_bh_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info); -static herr_t H5O__dset_flush(H5G_loc_t *obj_loc, hid_t dxpl_id); +static herr_t H5O__dset_flush(void *_obj_ptr, hid_t dxpl_id); /*********************/ @@ -443,33 +443,27 @@ done: *------------------------------------------------------------------------- */ static herr_t -H5O__dset_flush(H5G_loc_t *obj_loc, hid_t dxpl_id) +H5O__dset_flush(void *_obj_ptr, hid_t dxpl_id) { - H5D_t *dset = NULL; /* Dataset opened */ + H5D_t *dset = (H5D_t *)_obj_ptr; /* Pointer to dataset object */ H5O_type_t obj_type; /* Type of object at location */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC - HDassert(obj_loc); - HDassert(obj_loc->oloc); + HDassert(dset); + HDassert(&dset->oloc); /* Check that the object found is the correct type */ - if(H5O_obj_type(obj_loc->oloc, &obj_type, dxpl_id) < 0) + if(H5O_obj_type(&dset->oloc, &obj_type, dxpl_id) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get object type") if(obj_type != H5O_TYPE_DATASET) HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not a dataset") - /* Open the dataset */ - if(NULL == (dset = H5D_open(obj_loc, H5P_DATASET_ACCESS_DEFAULT, dxpl_id))) - HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open dataset") - if(H5D__flush_real(dset, dxpl_id) < 0) HDONE_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to flush cached dataset info") done: - if(dset && H5D_close(dset) < 0) - HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release dataset") FUNC_LEAVE_NOAPI(ret_value) } /* end H5O__dset_flush() */ diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index 530e418..4d8461f 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -33,6 +33,7 @@ /* Other private headers needed by this file */ #include "H5ACprivate.h" /* Metadata cache */ +#include "H5B2private.h" /* v2 B-trees */ #include "H5Fprivate.h" /* File access */ #include "H5Gprivate.h" /* Groups */ #include "H5SLprivate.h" /* Skip lists */ @@ -44,7 +45,7 @@ /* Set the minimum object header size to create objects with */ #define H5D_MINHDR_SIZE 256 - +#if 0 /* [Simple] Macro to construct a H5D_io_info_t from it's components */ #define H5D_BUILD_IO_INFO_WRT(io_info, ds, dxpl_c, dxpl_m, dxpl_r, str, buf) \ (io_info)->dset = ds; \ @@ -62,11 +63,33 @@ (io_info)->store = str; \ (io_info)->op_type = H5D_IO_OP_READ; \ (io_info)->u.rbuf = buf +#endif /* Flags for marking aspects of a dataset dirty */ #define H5D_MARK_SPACE 0x01 #define H5D_MARK_LAYOUT 0x02 +/* Default creation parameters for chunk index data structures */ +/* See H5O_layout_chunk_t */ + +/* Fixed array creation values */ +#define H5D_FARRAY_CREATE_PARAM_SIZE 1 /* Size of the creation parameters in bytes */ +#define H5D_FARRAY_MAX_DBLK_PAGE_NELMTS_BITS 10 /* i.e. 1024 elements per data block page */ + +/* Extensible array creation values */ +#define H5D_EARRAY_CREATE_PARAM_SIZE 5 /* Size of the creation parameters in bytes */ +#define H5D_EARRAY_MAX_NELMTS_BITS 32 /* i.e. 4 giga-elements */ +#define H5D_EARRAY_IDX_BLK_ELMTS 4 +#define H5D_EARRAY_SUP_BLK_MIN_DATA_PTRS 4 +#define H5D_EARRAY_DATA_BLK_MIN_ELMTS 16 +#define H5D_EARRAY_MAX_DBLOCK_PAGE_NELMTS_BITS 10 /* i.e. 1024 elements per data block page */ + +/* v2 B-tree creation values for raw meta_size */ +#define H5D_BT2_CREATE_PARAM_SIZE 6 /* Size of the creation parameters in bytes */ +#define H5D_BT2_NODE_SIZE 2048 +#define H5D_BT2_SPLIT_PERC 100 +#define H5D_BT2_MERGE_PERC 40 + /****************************/ /* Package Private Typedefs */ @@ -345,6 +368,7 @@ typedef struct H5D_chunk_ud_t { unsigned idx_hint; /* Index of chunk in cache, if present */ H5F_block_t chunk_block; /* Offset/length of chunk in file */ unsigned filter_mask; /* Excluded filters */ + hbool_t new_unfilt_chunk; /* Whether the chunk just became unfiltered */ hsize_t chunk_idx; /* Chunk index for EA, FA indexing */ } H5D_chunk_ud_t; @@ -555,6 +579,13 @@ H5_DLLVAR const H5D_layout_ops_t H5D_LOPS_VIRTUAL[1]; /* Chunked layout operations */ H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_BTREE[1]; +H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_EARRAY[1]; +H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_FARRAY[1]; +H5_DLLVAR const H5D_chunk_ops_t H5D_COPS_BT2[1]; + +/* The v2 B-tree class for indexing chunked datasets with >1 unlimited dimensions */ +H5_DLLVAR const H5B2_class_t H5D_BT2[1]; +H5_DLLVAR const H5B2_class_t H5D_BT2_FILT[1]; /******************************/ @@ -616,6 +647,10 @@ H5_DLL herr_t H5D__scatgath_write(const H5D_io_info_t *io_info, H5_DLL herr_t H5D__layout_set_io_ops(const H5D_t *dataset); H5_DLL size_t H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, hbool_t include_compact_data); +H5_DLL herr_t H5D__layout_set_latest_version(H5O_layout_t *layout, + const H5S_t *space, const H5D_dcpl_cache_t *dcpl_cache); +H5_DLL herr_t H5D__layout_set_latest_indexing(H5O_layout_t *layout, + const H5S_t *space, const H5D_dcpl_cache_t *dcpl_cache); H5_DLL herr_t H5D__layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh, H5D_t *dset, hid_t dapl_id); H5_DLL herr_t H5D__layout_oh_read(H5D_t *dset, hid_t dxpl_id, hid_t dapl_id, @@ -650,8 +685,11 @@ H5_DLL herr_t H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled, H5D_chunk_ud_t *udata); H5_DLL herr_t H5D__chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes); H5_DLL herr_t H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t old_dim[]); +H5_DLL herr_t H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id, + hsize_t old_dim[]); H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim); +H5_DLL herr_t H5D__chunk_set_sizes(H5D_t *dset); #ifdef H5_HAVE_PARALLEL H5_DLL herr_t H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[]); #endif /* H5_HAVE_PARALLEL */ @@ -744,6 +782,7 @@ H5_DLL herr_t H5D__piece_io_term(H5D_io_info_t *io_info, H5D_dset_info_t *di); #ifdef H5D_TESTING H5_DLL herr_t H5D__layout_version_test(hid_t did, unsigned *version); H5_DLL herr_t H5D__layout_contig_size_test(hid_t did, hsize_t *size); +H5_DLL herr_t H5D__layout_idx_type_test(hid_t did, H5D_chunk_index_t *idx_type); H5_DLL herr_t H5D__current_cache_size_test(hid_t did, size_t *nbytes_used, int *nused); #endif /* H5D_TESTING */ diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h index 4eee9a2..3d61ac6 100644 --- a/src/H5Dpublic.h +++ b/src/H5Dpublic.h @@ -35,6 +35,9 @@ #define H5D_CHUNK_CACHE_NBYTES_DEFAULT ((size_t) -1) #define H5D_CHUNK_CACHE_W0_DEFAULT (-1.0f) +/* Bit flags for the H5Pset_chunk_opts() and H5Pget_chunk_opts() */ +#define H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS (0x0002u) + /* Property names for H5LTDdirect_chunk_write */ #define H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME "direct_chunk_flag" #define H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME "direct_chunk_filters" @@ -59,8 +62,11 @@ typedef enum H5D_layout_t { /* Types of chunk index data structures */ typedef enum H5D_chunk_index_t { - H5D_CHUNK_IDX_BTREE = 0, /* v1 B-tree index */ - H5D_CHUNK_IDX_NTYPES /* this one must be last! */ + H5D_CHUNK_IDX_BTREE = 0, /* v1 B-tree index (default) */ + H5D_CHUNK_IDX_FARRAY = 3, /* Fixed array (for 0 unlimited dims) */ + H5D_CHUNK_IDX_EARRAY = 4, /* Extensible array (for 1 unlimited dim) */ + H5D_CHUNK_IDX_BT2 = 5, /* v2 B-tree index (for >1 unlimited dims) */ + H5D_CHUNK_IDX_NTYPES /* This one must be last! */ } H5D_chunk_index_t; /* Values for the space allocation time property */ @@ -180,6 +186,7 @@ H5_DLL herr_t H5Ddebug(hid_t dset_id); #ifndef H5_NO_DEPRECATED_SYMBOLS /* Macros */ +#define H5D_CHUNK_BTREE H5D_CHUNK_IDX_BTREE /* Typedefs */ diff --git a/src/H5Dtest.c b/src/H5Dtest.c index fd8ff71..c3b0b19 100644 --- a/src/H5Dtest.c +++ b/src/H5Dtest.c @@ -144,6 +144,47 @@ done: /*-------------------------------------------------------------------------- NAME + H5D__layout_idx_type_test + PURPOSE + Determine the storage layout index type for a dataset's layout information + USAGE + herr_t H5D__layout_idx_type_test(did, idx_type) + hid_t did; IN: Dataset to query + H5D_chunk_index_t *idx_type; OUT: Pointer to location to place index type info + RETURNS + Non-negative on success, negative on failure + DESCRIPTION + Checks the index type of the storage layout information for a dataset. + GLOBAL VARIABLES + COMMENTS, BUGS, ASSUMPTIONS + DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING + EXAMPLES + REVISION LOG +--------------------------------------------------------------------------*/ +herr_t +H5D__layout_idx_type_test(hid_t did, H5D_chunk_index_t *idx_type) +{ + H5D_t *dset; /* Pointer to dataset to query */ + herr_t ret_value = SUCCEED; /* return value */ + + FUNC_ENTER_PACKAGE + + /* Check args */ + if(NULL == (dset = (H5D_t *)H5I_object_verify(did, H5I_DATASET))) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset") + if(dset->shared->layout.type != H5D_CHUNKED) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "dataset is not chunked") + + if(idx_type) + *idx_type = dset->shared->layout.u.chunk.idx_type; + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* H5D__layout_idx_type_test() */ + + +/*-------------------------------------------------------------------------- + NAME H5D__current_cache_size_test PURPOSE Determine current the size of the dataset's chunk cache @@ -88,6 +88,8 @@ hbool_t H5_PKG_INIT_VAR = FALSE; * client class.. */ const H5EA_class_t *const H5EA_client_class_g[] = { + H5EA_CLS_CHUNK, /* 0 - H5EA_CLS_CHUNK_ID */ + H5EA_CLS_FILT_CHUNK, /* 1 - H5EA_CLS_FILT_CHUNK_ID */ H5EA_CLS_TEST, /* ? - H5EA_CLS_TEST_ID */ }; @@ -104,6 +106,8 @@ const H5EA_class_t *const H5EA_client_class_g[] = { /* Declare a free list to manage the H5EA_t struct */ H5FL_DEFINE_STATIC(H5EA_t); +/* Declare a PQ free list to manage the element */ +H5FL_BLK_DEFINE(ea_native_elmt); /*------------------------------------------------------------------------- @@ -877,116 +881,6 @@ END_FUNC(PRIV) /* end H5EA_undepend() */ /*------------------------------------------------------------------------- - * Function: H5EA_support - * - * Purpose: Create a child flush dependency on the array metadata that - * contains the element for an array index. - * - * Return: SUCCEED/FAIL - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * May 21 2009 - * - *------------------------------------------------------------------------- - */ -BEGIN_FUNC(PRIV, ERR, -herr_t, SUCCEED, FAIL, -H5EA_support(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, H5AC_info_t *child_entry)) - - /* Local variables */ - void *thing = NULL; /* Pointer to the array metadata containing the array index we are interested in */ - uint8_t *thing_elmt_buf; /* Pointer to the element buffer for the array metadata */ - hsize_t thing_elmt_idx; /* Index of the element in the element buffer for the array metadata */ - H5EA__unprotect_func_t thing_unprot_func; /* Function pointer for unprotecting the array metadata */ - -#ifdef QAK -HDfprintf(stderr, "%s: Called\n", FUNC); -HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx); -#endif /* QAK */ - - /* - * Check arguments. - */ - HDassert(ea); - - /* Look up the array metadata containing the element we want to set */ - if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0) - H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata") - - /* Sanity check */ - HDassert(thing); - HDassert(thing_elmt_buf); - HDassert(thing_unprot_func); - - /* Set up flush dependency between child_entry and metadata array 'thing' */ - if(H5EA__create_flush_depend((H5AC_info_t *)thing, child_entry) < 0) - H5E_THROW(H5E_CANTDEPEND, "unable to create flush dependency on array metadata") - -CATCH - /* Release resources */ - if(thing && (thing_unprot_func)(thing, dxpl_id, H5AC__NO_FLAGS_SET) < 0) - H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array metadata") - -END_FUNC(PRIV) /* end H5EA_support() */ - - -/*------------------------------------------------------------------------- - * Function: H5EA_unsupport - * - * Purpose: Remove a flush dependency on the array metadata that contains - * the element for an array index. - * - * Return: SUCCEED/FAIL - * - * Programmer: Quincey Koziol - * koziol@hdfgroup.org - * May 21 2009 - * - *------------------------------------------------------------------------- - */ -BEGIN_FUNC(PRIV, ERR, -herr_t, SUCCEED, FAIL, -H5EA_unsupport(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, H5AC_info_t *child_entry)) - - /* Local variables */ - void *thing = NULL; /* Pointer to the array metadata containing the array index we are interested in */ - uint8_t *thing_elmt_buf; /* Pointer to the element buffer for the array metadata */ - hsize_t thing_elmt_idx; /* Index of the element in the element buffer for the array metadata */ - H5EA__unprotect_func_t thing_unprot_func; /* Function pointer for unprotecting the array metadata */ - -#ifdef QAK -HDfprintf(stderr, "%s: Called\n", FUNC); -HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx); -#endif /* QAK */ - - /* - * Check arguments. - */ - HDassert(ea); - - /* Look up the array metadata containing the element we want to set */ - if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0) - H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata") - - /* Sanity check */ - HDassert(thing); - HDassert(thing_elmt_buf); - HDassert(thing_unprot_func); - - /* Remove flush dependency between child_entry and metadata array 'thing' */ - if(H5EA__destroy_flush_depend((H5AC_info_t *)thing, child_entry) < 0) - H5E_THROW(H5E_CANTUNDEPEND, "unable to destroy flush dependency on array metadata") - -CATCH - /* Release resources */ - if(thing && (thing_unprot_func)(thing, dxpl_id, H5AC__NO_FLAGS_SET) < 0) - H5E_THROW(H5E_CANTUNPROTECT, "unable to release extensible array metadata") - -END_FUNC(PRIV) /* end H5EA_unsupport() */ - - -/*------------------------------------------------------------------------- * Function: H5EA_close * * Purpose: Close an extensible array @@ -1142,3 +1036,87 @@ CATCH END_FUNC(PRIV) /* end H5EA_delete() */ + +/*------------------------------------------------------------------------- + * Function: H5EA_iterate + * + * Purpose: Iterate over the elements of an extensible array + * (copied and modified from FA_iterate() in H5FA.c) + * + * Return: SUCCEED/FAIL + * + * Programmer: Vailin Choi; Feb 2015 + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(PRIV, ERR, +herr_t, SUCCEED, FAIL, +H5EA_iterate(H5EA_t *ea, hid_t dxpl_id, H5EA_operator_t op, void *udata)) + + /* Local variables */ + uint8_t *elmt = NULL; + hsize_t u; + + /* + * Check arguments. + */ + HDassert(ea); + HDassert(op); + HDassert(udata); + + /* Allocate space for a native array element */ + if(NULL == (elmt = H5FL_BLK_MALLOC(ea_native_elmt, ea->hdr->cparam.cls->nat_elmt_size))) + H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array element") + + /* Iterate over all elements in array */ + for(u = 0; u < ea->hdr->stats.stored.max_idx_set; u++) { + int cb_ret; /* Return value from callback */ + + /* Get array element */ + if(H5EA_get(ea, dxpl_id, u, elmt) < 0) + H5E_THROW(H5E_CANTGET, "unable to delete fixed array") + + /* Make callback */ + if((cb_ret = (*op)(u, elmt, udata)) < 0) { + H5E_PRINTF(H5E_BADITER, "iterator function failed"); + H5_LEAVE(cb_ret) + } /* end if */ + } /* end for */ + +CATCH + + if(elmt) + elmt = H5FL_BLK_FREE(ea_native_elmt, elmt); + +END_FUNC(PRIV) /* end H5EA_iterate() */ + + +/*------------------------------------------------------------------------- + * Function: H5EA_patch_file + * + * Purpose: Patch the top-level file pointer contained in ea + * to point to idx_info->f if they are different. + * This is possible because the file pointer in ea can be + * closed out if ea remains open. + * + * Return: SUCCEED + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(PRIV, NOERR, +herr_t, SUCCEED, -, +H5EA_patch_file(H5EA_t *ea, H5F_t *f)) + + /* Local variables */ + + /* + * Check arguments. + */ + HDassert(ea); + HDassert(f); + + if(ea->f != f || ea->hdr->f != f) + ea->f = ea->hdr->f = f; + +END_FUNC(PRIV) /* end H5EA_patch_file() */ + diff --git a/src/H5EAprivate.h b/src/H5EAprivate.h index 33b4553..0a1b945 100644 --- a/src/H5EAprivate.h +++ b/src/H5EAprivate.h @@ -49,6 +49,9 @@ /* Extensible array class IDs */ typedef enum H5EA_cls_id_t { + H5EA_CLS_CHUNK_ID = 0, /* Extensible array is for indexing dataset chunks w/o filters */ + H5EA_CLS_FILT_CHUNK_ID, /* Extensible array is for indexing dataset chunks w/filters */ + /* Start real class IDs at 0 -QAK */ /* (keep these last) */ H5EA_CLS_TEST_ID, /* Extensible array is for testing (do not use for actual data) */ @@ -112,11 +115,20 @@ typedef struct H5EA_stat_t { /* Extensible array info (forward decl - defined in H5EApkg.h) */ typedef struct H5EA_t H5EA_t; +/* Define the operator callback function pointer for H5EA_iterate() */ +typedef int (*H5EA_operator_t)(hsize_t idx, const void *_elmt, void *_udata); + /*****************************/ /* Library-private Variables */ /*****************************/ +/* The Extensible Array class for dataset chunks w/o filters*/ +H5_DLLVAR const H5EA_class_t H5EA_CLS_CHUNK[1]; + +/* The Extensible Array class for dataset chunks w/ filters*/ +H5_DLLVAR const H5EA_class_t H5EA_CLS_FILT_CHUNK[1]; + /***************************************/ /* Library-private Function Prototypes */ @@ -131,13 +143,10 @@ H5_DLL herr_t H5EA_get_addr(const H5EA_t *ea, haddr_t *addr); H5_DLL herr_t H5EA_set(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, const void *elmt); H5_DLL herr_t H5EA_get(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, void *elmt); H5_DLL herr_t H5EA_depend(H5AC_info_t *parent_entry, H5EA_t *ea); -H5_DLL herr_t H5EA_undepend(H5AC_info_t *parent_entry, H5EA_t *ea); -H5_DLL herr_t H5EA_support(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, - H5AC_info_t *child_entry); -H5_DLL herr_t H5EA_unsupport(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, - H5AC_info_t *child_entry); +H5_DLL herr_t H5EA_iterate(H5EA_t *fa, hid_t dxpl_id, H5EA_operator_t op, void *udata); H5_DLL herr_t H5EA_close(H5EA_t *ea, hid_t dxpl_id); H5_DLL herr_t H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata); +H5_DLL herr_t H5EA_patch_file(H5EA_t *fa, H5F_t *f); /* Statistics routines */ H5_DLL herr_t H5EA_get_stats(const H5EA_t *ea, H5EA_stat_t *stats); @@ -79,6 +79,8 @@ hbool_t H5_PKG_INIT_VAR = FALSE; * client class.. */ const H5FA_class_t *const H5FA_client_class_g[] = { + H5FA_CLS_CHUNK, /* 0 - H5FA_CLS_CHUNK_ID */ + H5FA_CLS_FILT_CHUNK, /* 1 - H5FA_CLS_FILT_CHUNK_ID */ H5FA_CLS_TEST, /* ? - H5FA_CLS_TEST_ID */ }; @@ -739,3 +741,36 @@ CATCH END_FUNC(PRIV) /* end H5FA_iterate() */ + +/*------------------------------------------------------------------------- + * Function: H5FA_patch_file + * + * Purpose: Patch the top-level file pointer contained in fa + * to point to idx_info->f if they are different. + * This is possible because the file pointer in fa can be + * closed out if fa remains open. + * + * Return: SUCCEED + * + *------------------------------------------------------------------------- + */ +BEGIN_FUNC(PRIV, NOERR, +herr_t, SUCCEED, -, +H5FA_patch_file(H5FA_t *fa, H5F_t *f)) + + /* Local variables */ + +#ifdef H5FA_DEBUG +HDfprintf(stderr, "%s: Called\n", FUNC); +#endif /* H5FA_DEBUG */ + + /* + * Check arguments. + */ + HDassert(fa); + HDassert(f); + + if(fa->f != f || fa->hdr->f != f) + fa->f = fa->hdr->f = f; + +END_FUNC(PRIV) /* end H5FA_patch_file() */ diff --git a/src/H5FAprivate.h b/src/H5FAprivate.h index dfc73f5..1e76468 100644 --- a/src/H5FAprivate.h +++ b/src/H5FAprivate.h @@ -46,6 +46,9 @@ /* Fixed Array class IDs */ typedef enum H5FA_cls_id_t { + H5FA_CLS_CHUNK_ID = 0, /* Fixed array is for indexing dataset chunks w/o filters */ + H5FA_CLS_FILT_CHUNK_ID, /* Fixed array is for indexing dataset chunks w/filters */ + /* Start real class IDs at 0 -QAK */ /* (keep these last) */ H5FA_CLS_TEST_ID, /* Fixed array is for testing (do not use for actual data) */ @@ -104,6 +107,12 @@ typedef int (*H5FA_operator_t)(hsize_t idx, const void *_elmt, void *_udata); /* Library-private Variables */ /*****************************/ +/* The Fixed Array class for dataset chunks w/o filters*/ +H5_DLLVAR const H5FA_class_t H5FA_CLS_CHUNK[1]; + +/* The Fixed Array class for dataset chunks w/ filters*/ +H5_DLLVAR const H5FA_class_t H5FA_CLS_FILT_CHUNK[1]; + /***************************************/ /* Library-private Function Prototypes */ @@ -120,6 +129,7 @@ H5_DLL herr_t H5FA_get(const H5FA_t *fa, hid_t dxpl_id, hsize_t idx, void *elmt) H5_DLL herr_t H5FA_iterate(H5FA_t *fa, hid_t dxpl_id, H5FA_operator_t op, void *udata); H5_DLL herr_t H5FA_close(H5FA_t *fa, hid_t dxpl_id); H5_DLL herr_t H5FA_delete(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata); +H5_DLL herr_t H5FA_patch_file(H5FA_t *fa, H5F_t *f); /* Statistics routines */ H5_DLL herr_t H5FA_get_stats(const H5FA_t *ea, H5FA_stat_t *stats); diff --git a/src/H5Fint.c b/src/H5Fint.c index 1c29fee..4da382b 100644 --- a/src/H5Fint.c +++ b/src/H5Fint.c @@ -130,6 +130,7 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref) H5FD_driver_prop_t driver_prop; /* Property for driver ID & info */ hbool_t driver_prop_copied = FALSE; /* Whether the driver property has been set up */ unsigned efc_size = 0; + hbool_t latest_format = FALSE; /* Always use the latest format? */ hid_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI(FAIL) @@ -166,7 +167,9 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't sieve buffer size") if(H5P_set(new_plist, H5F_ACS_SDATA_BLOCK_SIZE_NAME, &(f->shared->sdata_aggr.alloc_size)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set 'small data' cache size") - if(H5P_set(new_plist, H5F_ACS_LATEST_FORMAT_NAME, &(f->shared->latest_format)) < 0) + if(f->shared->latest_flags > 0) + latest_format = TRUE; + if(H5P_set(new_plist, H5F_ACS_LATEST_FORMAT_NAME, &latest_format) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set 'latest format' flag") if(f->shared->efc) efc_size = H5F_efc_max_nfiles(f->shared->efc); @@ -575,6 +578,7 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t else { H5P_genplist_t *plist; /* Property list */ unsigned efc_size; /* External file cache size */ + hbool_t latest_format; /* Always use the latest format? */ size_t u; /* Local index variable */ HDassert(lf != NULL); @@ -630,8 +634,11 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get garbage collect reference") if(H5P_get(plist, H5F_ACS_SIEVE_BUF_SIZE_NAME, &(f->shared->sieve_buf_size)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get sieve buffer size") - if(H5P_get(plist, H5F_ACS_LATEST_FORMAT_NAME, &(f->shared->latest_format)) < 0) + if(H5P_get(plist, H5F_ACS_LATEST_FORMAT_NAME, &latest_format) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get 'latest format' flag") + /* For latest format, activate all latest version support */ + if(latest_format) + f->shared->latest_flags |= H5F_LATEST_ALL_FLAGS; if(H5P_get(plist, H5F_ACS_META_BLOCK_SIZE_NAME, &(f->shared->meta_aggr.alloc_size)) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get metadata cache size") f->shared->meta_aggr.feature_flag = H5FD_FEAT_AGGREGATE_METADATA; diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h index ab2ef9e..06f0207 100644 --- a/src/H5Fpkg.h +++ b/src/H5Fpkg.h @@ -268,7 +268,7 @@ struct H5F_file_t { hsize_t threshold; /* Threshold for alignment */ hsize_t alignment; /* Alignment */ unsigned gc_ref; /* Garbage-collect references? */ - hbool_t latest_format; /* Always use the latest format? */ + unsigned latest_flags; /* The latest version support */ hbool_t store_msg_crt_idx; /* Store creation index for object header messages? */ unsigned ncwfs; /* Num entries on cwfs list */ struct H5HG_heap_t **cwfs; /* Global heap cache */ diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 52649e5..3a92538 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -308,7 +308,7 @@ #define H5F_RDCC_W0(F) ((F)->shared->rdcc_w0) #define H5F_SIEVE_BUF_SIZE(F) ((F)->shared->sieve_buf_size) #define H5F_GC_REF(F) ((F)->shared->gc_ref) -#define H5F_USE_LATEST_FORMAT(F) ((F)->shared->latest_format) +#define H5F_USE_LATEST_FLAGS(F,FL) ((F)->shared->latest_flags & (FL)) #define H5F_STORE_MSG_CRT_IDX(F) ((F)->shared->store_msg_crt_idx) #define H5F_SET_STORE_MSG_CRT_IDX(F, FL) ((F)->shared->store_msg_crt_idx = (FL)) #define H5F_GRP_BTREE_SHARED(F) ((F)->shared->grp_btree_shared) @@ -353,7 +353,7 @@ #define H5F_RDCC_W0(F) (H5F_rdcc_w0(F)) #define H5F_SIEVE_BUF_SIZE(F) (H5F_sieve_buf_size(F)) #define H5F_GC_REF(F) (H5F_gc_ref(F)) -#define H5F_USE_LATEST_FORMAT(F) (H5F_use_latest_format(F)) +#define H5F_USE_LATEST_FLAGS(F,FL) (H5F_use_latest_flags(F,FL)) #define H5F_STORE_MSG_CRT_IDX(F) (H5F_store_msg_crt_idx(F)) #define H5F_SET_STORE_MSG_CRT_IDX(F, FL) (H5F_set_store_msg_crt_idx((F), (FL))) #define H5F_GRP_BTREE_SHARED(F) (H5F_grp_btree_shared(F)) @@ -555,6 +555,22 @@ #define H5SM_LIST_MAGIC "SMLI" /* Shared Message List */ +/* Latest format will activate the following latest version support */ +/* "latest_flags" in H5F_file_t */ +#define H5F_LATEST_DATATYPE 0x0001 +#define H5F_LATEST_DATASPACE 0x0002 +#define H5F_LATEST_ATTRIBUTE 0x0004 +#define H5F_LATEST_FILL_MSG 0x0008 +#define H5F_LATEST_PLINE_MSG 0x0010 +#define H5F_LATEST_LAYOUT_MSG 0x0020 +#define H5F_LATEST_NO_MOD_TIME_MSG 0x0040 +#define H5F_LATEST_STYLE_GROUP 0x0080 +#define H5F_LATEST_OBJ_HEADER 0x0100 +#define H5F_LATEST_SUPERBLOCK 0x0200 +#define H5F_LATEST_ALL_FLAGS (H5F_LATEST_DATATYPE | H5F_LATEST_DATASPACE | H5F_LATEST_ATTRIBUTE | H5F_LATEST_FILL_MSG | H5F_LATEST_PLINE_MSG | H5F_LATEST_LAYOUT_MSG | H5F_LATEST_NO_MOD_TIME_MSG | H5F_LATEST_STYLE_GROUP | H5F_LATEST_OBJ_HEADER | H5F_LATEST_SUPERBLOCK) + +#define H5F_LATEST_DSET_MSG_FLAGS (H5F_LATEST_FILL_MSG | H5F_LATEST_PLINE_MSG | H5F_LATEST_LAYOUT_MSG) + /****************************/ /* Library Private Typedefs */ /****************************/ @@ -641,7 +657,7 @@ H5_DLL size_t H5F_rdcc_nslots(const H5F_t *f); H5_DLL double H5F_rdcc_w0(const H5F_t *f); H5_DLL size_t H5F_sieve_buf_size(const H5F_t *f); H5_DLL unsigned H5F_gc_ref(const H5F_t *f); -H5_DLL hbool_t H5F_use_latest_format(const H5F_t *f); +H5_DLL unsigned H5F_use_latest_flags(const H5F_t *f, unsigned fl); H5_DLL hbool_t H5F_store_msg_crt_idx(const H5F_t *f); H5_DLL herr_t H5F_set_store_msg_crt_idx(H5F_t *f, hbool_t flag); H5_DLL struct H5UC_t *H5F_grp_btree_shared(const H5F_t *f); diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 122f711..eba9b12 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -48,9 +48,6 @@ * which are compatible with the library to which the application is linked. * We're assuming that these constants are used rather early in the hdf5 * session. - * - * Note that H5F_ACC_DEBUG is deprecated (nonfuncational) but retained as a - * symbol for backward compatibility. */ #define H5F_ACC_RDONLY (H5CHECK H5OPEN 0x0000u) /*absence of rdwr => rd-only */ #define H5F_ACC_RDWR (H5CHECK H5OPEN 0x0001u) /*open for read and write */ diff --git a/src/H5Fquery.c b/src/H5Fquery.c index e9af300..dd6e8e3 100644 --- a/src/H5Fquery.c +++ b/src/H5Fquery.c @@ -779,23 +779,22 @@ H5F_gc_ref(const H5F_t *f) /*------------------------------------------------------------------------- - * Function: H5F_use_latest_format + * Function: H5F_use_latest_flags * - * Purpose: Retrieve the 'use the latest version of the format' flag for - * the file. + * Purpose: Retrieve the 'latest version support' for the file. * - * Return: Success: Non-negative, the 'use the latest format' flag + * Return: Success: Non-negative, the requested 'version support' * * Failure: (can't happen) * * Programmer: Quincey Koziol * koziol@hdfgroup.org - * Oct 2 2006 + * Mar 5 2007 * *------------------------------------------------------------------------- */ -hbool_t -H5F_use_latest_format(const H5F_t *f) +unsigned +H5F_use_latest_flags(const H5F_t *f, unsigned fl) { /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */ FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -803,8 +802,8 @@ H5F_use_latest_format(const H5F_t *f) HDassert(f); HDassert(f->shared); - FUNC_LEAVE_NOAPI(f->shared->latest_format) -} /* end H5F_use_latest_format() */ + FUNC_LEAVE_NOAPI(f->shared->latest_flags & (fl)) +} /* end H5F_use_latest_flags() */ /*------------------------------------------------------------------------- diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index 70d1a49..4aca221 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -744,8 +744,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id) if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes") - /* Bump superblock version if we are to use the latest version of the format */ - if(f->shared->latest_format) + /* Bump superblock version if latest superblock version support is enabled */ + if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_SUPERBLOCK)) super_vers = HDF5_SUPERBLOCK_VERSION_LATEST; /* Bump superblock version to create superblock extension for SOHM info */ else if(f->shared->sohm_nindexes > 0) diff --git a/src/H5Gobj.c b/src/H5Gobj.c index f7782a6..92ad0af 100644 --- a/src/H5Gobj.c +++ b/src/H5Gobj.c @@ -208,7 +208,7 @@ H5G__obj_create_real(H5F_t *f, hid_t dxpl_id, const H5O_ginfo_t *ginfo, /* Check for using the latest version of the group format */ /* (add more checks for creating "new format" groups when needed) */ - if(H5F_USE_LATEST_FORMAT(f) || linfo->track_corder + if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_STYLE_GROUP) || linfo->track_corder || (pline && pline->nused)) use_latest_format = TRUE; else diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c index 19ea14a..42857c0 100644 --- a/src/H5HFhdr.c +++ b/src/H5HFhdr.c @@ -427,7 +427,7 @@ H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam) HGOTO_ERROR(H5E_HEAP, H5E_CANTCOPY, HADDR_UNDEF, "can't copy I/O filter pipeline") /* Pay attention to the latest version flag for the file */ - if(H5F_USE_LATEST_FORMAT(hdr->f)) + if(H5F_USE_LATEST_FLAGS(hdr->f, H5F_LATEST_PLINE_MSG)) /* Set the latest version for the I/O pipeline message */ if(H5O_pline_set_latest_version(&(hdr->pline)) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTSET, HADDR_UNDEF, "can't set latest version of I/O filter pipeline") @@ -1161,7 +1161,7 @@ H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, size_t initial_rc, /* Initialize file-specific information for object header */ store_msg_crt_idx = H5F_STORE_MSG_CRT_IDX(f); - if(H5F_USE_LATEST_FORMAT(f) || store_msg_crt_idx || (oh_flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED)) + if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_OBJ_HEADER) || store_msg_crt_idx || (oh_flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED)) oh->version = H5O_VERSION_LATEST; else oh->version = H5O_VERSION_1; diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index 936c8b8..0ee8cfd 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -354,17 +354,30 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, H5G_loc_t tmp_loc; /* Location of object */ H5O_loc_t tmp_oloc; /* Location of object */ H5G_name_t tmp_path; /* Object's path */ + void *obj_ptr = NULL; /* Object pointer */ + hid_t tmp_id = -1; /* Object ID */ tmp_loc.oloc = &tmp_oloc; tmp_loc.path = &tmp_path; tmp_oloc.file = oloc_src->file; tmp_oloc.addr = oloc_src->addr; - tmp_oloc.holding_file = oloc_src->holding_file; + tmp_oloc.holding_file = FALSE; H5G_name_reset(tmp_loc.path); - /* Flush the object of this class */ - if(obj_class->flush && obj_class->flush(&tmp_loc, dxpl_id) < 0) + /* Get a temporary ID */ + if((tmp_id = obj_class->open(&tmp_loc, H5P_DEFAULT, dxpl_id, FALSE)) < 0) + HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to open object") + + /* Get object pointer */ + obj_ptr = H5I_object(tmp_id); + + /* Flush the object */ + if(obj_class->flush && obj_class->flush(obj_ptr, dxpl_id) < 0) HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object") + + /* Release the temporary ID */ + if(tmp_id != -1 && H5I_dec_app_ref(tmp_id)) + HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to close temporary ID") } /* end if */ /* Get source object header */ diff --git a/src/H5Olayout.c b/src/H5Olayout.c index 31ddb88..bc1ebc6 100644 --- a/src/H5Olayout.c +++ b/src/H5Olayout.c @@ -124,7 +124,7 @@ H5O__layout_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for layout message") if(mesg->version < H5O_LAYOUT_VERSION_3) { - unsigned ndims; /* Num dimensions in chunk */ + unsigned ndims; /* Num dimensions in chunk */ /* Dimensionality */ ndims = *p++; @@ -233,26 +233,120 @@ H5O__layout_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED break; case H5D_CHUNKED: - /* Dimensionality */ - mesg->u.chunk.ndims = *p++; - if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS) - HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large") - - /* B-tree address */ - H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); - - /* Chunk dimensions */ - for(u = 0; u < mesg->u.chunk.ndims; u++) - UINT32DECODE(p, mesg->u.chunk.dim[u]); - - /* Compute chunk size */ - for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) - mesg->u.chunk.size *= mesg->u.chunk.dim[u]; - - /* Set the chunk operations */ - /* (Only "btree" indexing type supported with v3 of message format) */ - mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE; - mesg->storage.u.chunk.ops = H5D_COPS_BTREE; + if(mesg->version < H5O_LAYOUT_VERSION_4) { + /* Set the chunked layout flags */ + mesg->u.chunk.flags = (uint8_t)0; + + /* Dimensionality */ + mesg->u.chunk.ndims = *p++; + if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large") + + /* B-tree address */ + H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); + + /* Chunk dimensions */ + for(u = 0; u < mesg->u.chunk.ndims; u++) + UINT32DECODE(p, mesg->u.chunk.dim[u]); + + /* Compute chunk size */ + for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) + mesg->u.chunk.size *= mesg->u.chunk.dim[u]; + + /* Set the chunk operations */ + /* (Only "btree" indexing type supported with v3 of message format) */ + mesg->storage.u.chunk.idx_type = H5D_CHUNK_IDX_BTREE; + mesg->storage.u.chunk.ops = H5D_COPS_BTREE; + } /* end if */ + else { + /* Get the chunked layout flags */ + mesg->u.chunk.flags = *p++; + + /* Check for valid flags */ + /* (Currently issues an error for all non-zero values, + * until features are added for the flags) + */ + if(mesg->u.chunk.flags & ~H5O_LAYOUT_ALL_CHUNK_FLAGS) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "bad flag value for message") + + /* Dimensionality */ + mesg->u.chunk.ndims = *p++; + if(mesg->u.chunk.ndims > H5O_LAYOUT_NDIMS) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "dimensionality is too large") + + /* Encoded # of bytes for each chunk dimension */ + mesg->u.chunk.enc_bytes_per_dim = *p++; + if(mesg->u.chunk.enc_bytes_per_dim == 0 || mesg->u.chunk.enc_bytes_per_dim > 8) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "encoded chunk dimension size is too large") + + /* Chunk dimensions */ + for(u = 0; u < mesg->u.chunk.ndims; u++) + UINT64DECODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim); + + /* Compute chunk size */ + for(u = 1, mesg->u.chunk.size = mesg->u.chunk.dim[0]; u < mesg->u.chunk.ndims; u++) + mesg->u.chunk.size *= mesg->u.chunk.dim[u]; + + /* Chunk index type */ + mesg->u.chunk.idx_type = (H5D_chunk_index_t)*p++; + if(mesg->u.chunk.idx_type >= H5D_CHUNK_IDX_NTYPES) + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "unknown chunk index type") + mesg->storage.u.chunk.idx_type = mesg->u.chunk.idx_type; + + switch(mesg->u.chunk.idx_type) { + case H5D_CHUNK_IDX_BTREE: + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "v1 B-tree index type should never be in a v4 layout message") + break; + + case H5D_CHUNK_IDX_FARRAY: + /* Fixed array creation parameters */ + mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits = *p++; + if(0 == mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid fixed array creation parameter") + + /* Set the chunk operations */ + mesg->storage.u.chunk.ops = H5D_COPS_FARRAY; + break; + + case H5D_CHUNK_IDX_EARRAY: + /* Extensible array creation parameters */ + mesg->u.chunk.u.earray.cparam.max_nelmts_bits = *p++; + if(0 == mesg->u.chunk.u.earray.cparam.max_nelmts_bits) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter") + mesg->u.chunk.u.earray.cparam.idx_blk_elmts = *p++; + if(0 == mesg->u.chunk.u.earray.cparam.idx_blk_elmts) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter") + mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs = *p++; + if(0 == mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter") + mesg->u.chunk.u.earray.cparam.data_blk_min_elmts = *p++; + if(0 == mesg->u.chunk.u.earray.cparam.data_blk_min_elmts) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter") + mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits = *p++; + if(0 == mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits) + HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "invalid extensible array creation parameter") + + /* Set the chunk operations */ + mesg->storage.u.chunk.ops = H5D_COPS_EARRAY; + break; + + case H5D_CHUNK_IDX_BT2: /* v2 B-tree index */ + UINT32DECODE(p, mesg->u.chunk.u.btree2.cparam.node_size); + mesg->u.chunk.u.btree2.cparam.split_percent = *p++; + mesg->u.chunk.u.btree2.cparam.merge_percent = *p++; + + /* Set the chunk operations */ + mesg->storage.u.chunk.ops = H5D_COPS_BT2; + break; + + case H5D_CHUNK_IDX_NTYPES: + default: + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "Invalid chunk index type") + } /* end switch */ + + /* Chunk index address */ + H5F_addr_decode(f, &p, &(mesg->storage.u.chunk.idx_addr)); + } /* end else */ /* Set the layout operations */ mesg->ops = H5D_LOPS_CHUNK; @@ -457,8 +551,8 @@ H5O__layout_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p, HDassert(p); /* Message version */ - *p++ = mesg->type == H5D_VIRTUAL ? (uint8_t)H5O_LAYOUT_VERSION_4 - : (uint8_t)H5O_LAYOUT_VERSION_3; + *p++ = (uint8_t)((mesg->version < H5O_LAYOUT_VERSION_3) ? + H5O_LAYOUT_VERSION_3 : mesg->version); /* Layout class */ *p++ = mesg->type; @@ -488,16 +582,74 @@ H5O__layout_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p, break; case H5D_CHUNKED: - /* Number of dimensions */ - HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS); - *p++ = (uint8_t)mesg->u.chunk.ndims; + if(mesg->version < H5O_LAYOUT_VERSION_4) { + /* Number of dimensions */ + HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + *p++ = (uint8_t)mesg->u.chunk.ndims; - /* B-tree address */ - H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr); + /* B-tree address */ + H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr); + + /* Dimension sizes */ + for(u = 0; u < mesg->u.chunk.ndims; u++) + UINT32ENCODE(p, mesg->u.chunk.dim[u]); + } /* end if */ + else { + /* Chunk feature flags */ + *p++ = mesg->u.chunk.flags; - /* Dimension sizes */ - for(u = 0; u < mesg->u.chunk.ndims; u++) - UINT32ENCODE(p, mesg->u.chunk.dim[u]); + /* Number of dimensions */ + HDassert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS); + *p++ = (uint8_t)mesg->u.chunk.ndims; + + /* Encoded # of bytes for each chunk dimension */ + HDassert(mesg->u.chunk.enc_bytes_per_dim > 0 && mesg->u.chunk.enc_bytes_per_dim <= 8); + *p++ = (uint8_t)mesg->u.chunk.enc_bytes_per_dim; + + /* Dimension sizes */ + for(u = 0; u < mesg->u.chunk.ndims; u++) + UINT64ENCODE_VAR(p, mesg->u.chunk.dim[u], mesg->u.chunk.enc_bytes_per_dim); + + /* Chunk index type */ + *p++ = (uint8_t)mesg->u.chunk.idx_type; + + switch(mesg->u.chunk.idx_type) { + case H5D_CHUNK_IDX_BTREE: + HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "v1 B-tree index type should never be in a v4 layout message") + break; + + case H5D_CHUNK_IDX_FARRAY: + /* Fixed array creation parameters */ + *p++ = mesg->u.chunk.u.farray.cparam.max_dblk_page_nelmts_bits; + break; + + case H5D_CHUNK_IDX_EARRAY: + /* Extensible array creation parameters */ + *p++ = mesg->u.chunk.u.earray.cparam.max_nelmts_bits; + *p++ = mesg->u.chunk.u.earray.cparam.idx_blk_elmts; + *p++ = mesg->u.chunk.u.earray.cparam.sup_blk_min_data_ptrs; + *p++ = mesg->u.chunk.u.earray.cparam.data_blk_min_elmts; + *p++ = mesg->u.chunk.u.earray.cparam.max_dblk_page_nelmts_bits; + break; + + case H5D_CHUNK_IDX_BT2: /* v2 B-tree index */ + UINT32ENCODE(p, mesg->u.chunk.u.btree2.cparam.node_size); + *p++ = mesg->u.chunk.u.btree2.cparam.split_percent; + *p++ = mesg->u.chunk.u.btree2.cparam.merge_percent; + break; + + case H5D_CHUNK_IDX_NTYPES: + default: + HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, FAIL, "Invalid chunk index type") + } /* end switch */ + + /* + * Implicit index: Address of the chunks + * Single chunk index: address of the single chunk + * Other indexes: chunk index address + */ + H5F_addr_encode(f, &p, mesg->storage.u.chunk.idx_addr); + } /* end else */ break; case H5D_VIRTUAL: @@ -1016,20 +1168,38 @@ H5O__layout_debug(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id, const v HDfprintf(stream, "}\n"); /* Index information */ - switch(mesg->storage.u.chunk.idx_type) { + switch(mesg->u.chunk.idx_type) { case H5D_CHUNK_IDX_BTREE: HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, "Index Type:", "v1 B-tree"); - HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, - "B-tree address:", mesg->storage.u.chunk.idx_addr); + break; + + case H5D_CHUNK_IDX_FARRAY: + HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, + "Index Type:", "Fixed Array"); + /* (Should print the fixed array creation parameters) */ + break; + + case H5D_CHUNK_IDX_EARRAY: + HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, + "Index Type:", "Extensible Array"); + /* (Should print the extensible array creation parameters) */ + break; + + case H5D_CHUNK_IDX_BT2: + HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, + "Index Type:", "v2 B-tree"); + /* (Should print the v2-Btree creation parameters) */ break; case H5D_CHUNK_IDX_NTYPES: default: HDfprintf(stream, "%*s%-*s %s (%u)\n", indent, "", fwidth, - "Index Type:", "Unknown", (unsigned)mesg->storage.u.chunk.idx_type); + "Index Type:", "Unknown", (unsigned)mesg->u.chunk.idx_type); break; } /* end switch */ + HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth, + "Index address:", mesg->storage.u.chunk.idx_addr); break; case H5D_CONTIGUOUS: diff --git a/src/H5Opkg.h b/src/H5Opkg.h index cc1b289..14bb342 100644 --- a/src/H5Opkg.h +++ b/src/H5Opkg.h @@ -66,7 +66,7 @@ #define H5O_ALIGN_OH(O, X) \ H5O_ALIGN_VERS((O)->version, X) #define H5O_ALIGN_F(F, X) \ - H5O_ALIGN_VERS((H5F_USE_LATEST_FORMAT(F) ? H5O_VERSION_LATEST : H5O_VERSION_1), X) + H5O_ALIGN_VERS((H5F_USE_LATEST_FLAGS(F, H5F_LATEST_OBJ_HEADER) ? H5O_VERSION_LATEST : H5O_VERSION_1), X) /* Size of checksum (on disk) */ #define H5O_SIZEOF_CHKSUM 4 @@ -138,7 +138,7 @@ #define H5O_SIZEOF_MSGHDR_OH(O) \ H5O_SIZEOF_MSGHDR_VERS((O)->version, (O)->flags & H5O_HDR_ATTR_CRT_ORDER_TRACKED) #define H5O_SIZEOF_MSGHDR_F(F, C) \ - H5O_SIZEOF_MSGHDR_VERS((H5F_USE_LATEST_FORMAT(F) || H5F_STORE_MSG_CRT_IDX(F)) ? H5O_VERSION_LATEST : H5O_VERSION_1, (C)) + H5O_SIZEOF_MSGHDR_VERS((H5F_USE_LATEST_FLAGS(F, H5F_LATEST_OBJ_HEADER) || H5F_STORE_MSG_CRT_IDX(F)) ? H5O_VERSION_LATEST : H5O_VERSION_1, (C)) /* * Size of chunk "header" for each chunk @@ -325,7 +325,7 @@ typedef struct H5O_obj_class_t { void *(*create)(H5F_t *, void *, H5G_loc_t *, hid_t ); /*create an object of this class */ H5O_loc_t *(*get_oloc)(hid_t ); /*get the object header location for an object */ herr_t (*bh_info)(const H5O_loc_t *loc, hid_t dxpl_id, H5O_t *oh, H5_ih_info_t *bh_info); /*get the index & heap info for an object */ - herr_t (*flush)(H5G_loc_t *loc, hid_t dxpl_id); /*flush an opened object of this class */ + herr_t (*flush)(void *obj_ptr, hid_t dxpl_id); /*flush an opened object of this class */ } H5O_obj_class_t; /* Node in skip list to map addresses from one file to another during object header copy */ @@ -606,7 +606,6 @@ H5_DLL herr_t H5O_attr_link(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, void *_mesg H5_DLL herr_t H5O_attr_count_real(H5F_t *f, hid_t dxpl_id, H5O_t *oh, hsize_t *nattrs); - /* These functions operate on object locations */ H5_DLL H5O_loc_t *H5O_get_loc(hid_t id); diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index e117b8a..46845a3 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -367,6 +367,12 @@ typedef struct H5O_efl_t { */ #define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1) +/* Flags for chunked layout feature encoding */ +#define H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS 0x01 +#define H5O_LAYOUT_ALL_CHUNK_FLAGS ( \ + H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS \ + ) + /* Initial version of the layout information. Used when space is allocated */ #define H5O_LAYOUT_VERSION_1 1 @@ -410,12 +416,39 @@ typedef struct H5O_storage_chunk_btree_t { H5UC_t *shared; /* Ref-counted shared info for B-tree nodes */ } H5O_storage_chunk_btree_t; +/* Forward declaration of structs used below */ +struct H5FA_t; /* Defined in H5FAprivate.h */ + +typedef struct H5O_storage_chunk_farray_t { + haddr_t dset_ohdr_addr; /* File address dataset's object header */ + struct H5FA_t *fa; /* Pointer to fixed index array struct */ +} H5O_storage_chunk_farray_t; + +/* Forward declaration of structs used below */ +struct H5EA_t; /* Defined in H5EAprivate.h */ + +typedef struct H5O_storage_chunk_earray_t { + haddr_t dset_ohdr_addr; /* File address dataset's object header */ + struct H5EA_t *ea; /* Pointer to extensible index array struct */ +} H5O_storage_chunk_earray_t; + +/* Forward declaration of structs used below */ +struct H5B2_t; /* Defined in H5B2pkg.h */ + +typedef struct H5O_storage_chunk_bt2_t { + haddr_t dset_ohdr_addr; /* File address dataset's object header */ + struct H5B2_t *bt2; /* Pointer to b-tree 2 struct */ +} H5O_storage_chunk_bt2_t; + typedef struct H5O_storage_chunk_t { H5D_chunk_index_t idx_type; /* Type of chunk index */ haddr_t idx_addr; /* File address of chunk index */ const struct H5D_chunk_ops_t *ops; /* Pointer to chunked storage operations */ union { H5O_storage_chunk_btree_t btree; /* Information for v1 B-tree index */ + H5O_storage_chunk_bt2_t btree2; /* Information for v2 B-tree index */ + H5O_storage_chunk_earray_t earray; /* Information for extensible array index */ + H5O_storage_chunk_farray_t farray; /* Information for fixed array index */ } u; } H5O_storage_chunk_t; @@ -510,7 +543,42 @@ typedef struct H5O_storage_t { } u; } H5O_storage_t; +typedef struct H5O_layout_chunk_farray_t { + /* Creation parameters for fixed array data structure */ + struct { + uint8_t max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in a data block page) - + i.e. # of bits needed to store max. # of elements + in a data block page */ + } cparam; +} H5O_layout_chunk_farray_t; + +typedef struct H5O_layout_chunk_earray_t { + /* Creation parameters for extensible array data structure */ + struct { + uint8_t max_nelmts_bits; /* Log2(Max. # of elements in array) - i.e. # of bits needed to store max. # of elements */ + uint8_t idx_blk_elmts; /* # of elements to store in index block */ + uint8_t data_blk_min_elmts; /* Min. # of elements per data block */ + uint8_t sup_blk_min_data_ptrs; /* Min. # of data block pointers for a super block */ + uint8_t max_dblk_page_nelmts_bits; /* Log2(Max. # of elements in data block page) - i.e. # of bits needed to store max. # of elements in data block page */ + } cparam; + + unsigned unlim_dim; /* Rank of unlimited dimension for dataset */ + uint32_t swizzled_dim[H5O_LAYOUT_NDIMS]; /* swizzled chunk dimensions */ + hsize_t swizzled_down_chunks[H5O_LAYOUT_NDIMS]; /* swizzled "down" size of number of chunks in each dimension */ +} H5O_layout_chunk_earray_t; + +typedef struct H5O_layout_chunk_bt2_t { + /* Creation parameters for v2 B-tree data structure */ + struct { + uint32_t node_size; /* Size of each node (in bytes) */ + uint8_t split_percent; /* % full to split nodes */ + uint8_t merge_percent; /* % full to merge nodes */ + } cparam; +} H5O_layout_chunk_bt2_t; + typedef struct H5O_layout_chunk_t { + H5D_chunk_index_t idx_type; /* Type of chunk index */ + uint8_t flags; /* Chunk layout flags */ unsigned ndims; /* Num dimensions in chunk */ uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */ unsigned enc_bytes_per_dim; /* Encoded # of bytes for storing each chunk dimension */ @@ -521,6 +589,11 @@ typedef struct H5O_layout_chunk_t { hsize_t max_chunks[H5O_LAYOUT_NDIMS]; /* # of chunks in each dataset's max. dimension */ hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */ hsize_t max_down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each max dim */ + union { + H5O_layout_chunk_farray_t farray; /* Information for fixed array index */ + H5O_layout_chunk_earray_t earray; /* Information for extensible array index */ + H5O_layout_chunk_bt2_t btree2; /* Information for v2 B-tree index */ + } u; } H5O_layout_chunk_t; typedef struct H5O_layout_t { diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c index df88bee..1237bfc 100644 --- a/src/H5Pdcpl.c +++ b/src/H5Pdcpl.c @@ -57,7 +57,7 @@ #define H5D_DEF_STORAGE_COMPACT_INIT {(hbool_t)FALSE, (size_t)0, NULL} #define H5D_DEF_STORAGE_CONTIG_INIT {HADDR_UNDEF, (hsize_t)0} #define H5D_DEF_STORAGE_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, HADDR_UNDEF, H5D_COPS_BTREE, {{HADDR_UNDEF, NULL}}} -#define H5D_DEF_LAYOUT_CHUNK_INIT {(unsigned)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, (unsigned)0, (uint32_t)0, (hsize_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}} +#define H5D_DEF_LAYOUT_CHUNK_INIT {H5D_CHUNK_IDX_BTREE, (uint8_t)0, (unsigned)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, (unsigned)0, (uint32_t)0, (hsize_t)0, (hsize_t)0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {{{(uint8_t)0}}}} #define H5D_DEF_STORAGE_VIRTUAL_INIT {{HADDR_UNDEF, 0}, 0, NULL, 0, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, H5D_VDS_ERROR, HSIZE_UNDEF, -1, -1, FALSE} #ifdef H5_HAVE_C99_DESIGNATED_INITIALIZER #define H5D_DEF_STORAGE_COMPACT {H5D_COMPACT, { .compact = H5D_DEF_STORAGE_COMPACT_INIT }} @@ -774,12 +774,6 @@ H5P__dcrt_layout_cmp(const void *_layout1, const void *_layout2, if(layout1->type > layout2->type) HGOTO_DONE(1) - /* Check for different layout version */ - if(layout1->version < layout2->version) - HGOTO_DONE(-1) - if(layout1->version > layout2->version) - HGOTO_DONE(1) - /* Compare non-dataset-specific fields in layout info */ switch(layout1->type) { case H5D_COMPACT: @@ -1651,14 +1645,6 @@ H5P__dcrt_ext_file_list_cmp(const void *_efl1, const void *_efl2, HDassert(efl2); HDassert(size == sizeof(H5O_efl_t)); - /* Check the heap address of external file lists */ - if(H5F_addr_defined(efl1->heap_addr) || H5F_addr_defined(efl2->heap_addr)) { - if(!H5F_addr_defined(efl1->heap_addr) && H5F_addr_defined(efl2->heap_addr)) HGOTO_DONE(-1); - if(H5F_addr_defined(efl1->heap_addr) && !H5F_addr_defined(efl2->heap_addr)) HGOTO_DONE(1); - if((cmp_value = H5F_addr_cmp(efl1->heap_addr, efl2->heap_addr)) != 0) - HGOTO_DONE(cmp_value); - } /* end if */ - /* Check the number of allocated efl entries */ if(efl1->nalloc < efl2->nalloc) HGOTO_DONE(-1); if(efl1->nalloc > efl2->nalloc) HGOTO_DONE(1); @@ -1998,7 +1984,6 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/]) H5P_genplist_t *plist; /* Property list pointer */ H5O_layout_t chunk_layout; /* Layout information for setting chunk info */ uint64_t chunk_nelmts; /* Number of elements in chunk */ - unsigned max_enc_bytes_per_dim; /* Max. number of bytes required to encode this dimension */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -2026,10 +2011,7 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/]) HDmemcpy(&chunk_layout, &H5D_def_layout_chunk_g, sizeof(H5D_def_layout_chunk_g)); HDmemset(&chunk_layout.u.chunk.dim, 0, sizeof(chunk_layout.u.chunk.dim)); chunk_nelmts = 1; - max_enc_bytes_per_dim = 0; for(u = 0; u < (unsigned)ndims; u++) { - unsigned enc_bytes_per_dim; /* Number of bytes required to encode this dimension */ - if(dim[u] == 0) HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be positive") if(dim[u] != (dim[u] & 0xffffffff)) @@ -2038,16 +2020,7 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/]) if(chunk_nelmts > (uint64_t)0xffffffff) HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "number of elements in chunk must be < 4GB") chunk_layout.u.chunk.dim[u] = (uint32_t)dim[u]; /* Store user's chunk dimensions */ - - /* Get encoded size of dim, in bytes */ - enc_bytes_per_dim = (H5VM_log2_gen(dim[u]) + 8) / 8; - - /* Check if this is the largest value so far */ - if(enc_bytes_per_dim > max_enc_bytes_per_dim) - max_enc_bytes_per_dim = enc_bytes_per_dim; } /* end for */ - HDassert(max_enc_bytes_per_dim > 0 && max_enc_bytes_per_dim <= 8); - chunk_layout.u.chunk.enc_bytes_per_dim = max_enc_bytes_per_dim; /* Get the plist structure */ if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE))) @@ -2631,6 +2604,127 @@ done: /*------------------------------------------------------------------------- + * Function: H5Pset_chunk_opts + * + * Purpose: Sets the options related to chunked storage for a dataset. + * The storage must already be set to chunked. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * Thursday, January 21, 2010 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pset_chunk_opts(hid_t plist_id, unsigned options) +{ + H5P_genplist_t *plist; /* Property list pointer */ + H5O_layout_t layout; /* Layout information for setting chunk info */ + uint8_t layout_flags = 0; /* "options" translated into layout message flags format */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "iIu", plist_id, options); + + /* Check arguments */ + if(options & ~(H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS)) + HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "unknown chunk options") + +#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER + /* If the compiler doesn't support C99 designated initializers, check if + * the default layout structs have been initialized yet or not. *ick* -QAK + */ + if(!H5P_dcrt_def_layout_init_g) + if(H5P__init_def_layout() < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info") +#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */ + + /* Get the plist structure */ + if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE))) + HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID") + + /* Retrieve the layout property */ + if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout") + if(H5D_CHUNKED != layout.type) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout") + + /* Translate options into flags that can be used with the layout message */ + if(options & H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) + layout_flags |= H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS; + + /* Update the layout message, including the version (if necessary) */ + /* This probably isn't the right way to do this, and should be changed once + * this branch gets the "real" way to set the layout version */ + layout.u.chunk.flags = layout_flags; + if(layout.version < H5O_LAYOUT_VERSION_4) + layout.version = H5O_LAYOUT_VERSION_4; + + /* Set layout value */ + if(H5P_poke(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't set layout") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pset_chunk_opts() */ + + +/*------------------------------------------------------------------------- + * Function: H5Pget_chunk_opts + * + * Purpose: Gets the options related to chunked storage for a dataset. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * Friday, January 22, 2010 + * + *------------------------------------------------------------------------- + */ +herr_t +H5Pget_chunk_opts(hid_t plist_id, unsigned *options) +{ + H5P_genplist_t *plist; /* Property list pointer */ + H5O_layout_t layout; /* Layout information for setting chunk info */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE2("e", "i*Iu", plist_id, options); + +#ifndef H5_HAVE_C99_DESIGNATED_INITIALIZER + /* If the compiler doesn't support C99 designated initializers, check if + * the default layout structs have been initialized yet or not. *ick* -QAK + */ + if(!H5P_dcrt_def_layout_init_g) + if(H5P__init_def_layout() < 0) + HGOTO_ERROR(H5E_PLIST, H5E_CANTINIT, FAIL, "can't initialize default layout info") +#endif /* H5_HAVE_C99_DESIGNATED_INITIALIZER */ + + /* Get the plist structure */ + if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE))) + HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID") + + /* Retrieve the layout property */ + if(H5P_peek(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0) + HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "can't get layout") + if(H5D_CHUNKED != layout.type) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout") + + if(options) { + /* Translate options from flags that can be used with the layout message + * to those known to the public */ + *options = 0; + if(layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) + *options |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + } /* end if */ + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5Pget_chunk_opts() */ + + +/*------------------------------------------------------------------------- * Function: H5Pset_external * * Purpose: Adds an external file to the list of external files. PLIST_ID diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h index 3662cf9..7d29f3d 100644 --- a/src/H5Ppkg.h +++ b/src/H5Ppkg.h @@ -204,7 +204,6 @@ H5_DLL herr_t H5P_get_filter(const struct H5Z_filter_info_t *filter, #ifdef H5P_TESTING H5_DLL char *H5P_get_class_path_test(hid_t pclass_id); H5_DLL hid_t H5P_open_class_path_test(const char *path); -H5_DLL herr_t H5P_reset_external_file_test(hid_t dcpl_id); #endif /* H5P_TESTING */ #endif /* _H5Ppkg_H */ diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h index 4bf1d06..91b5745 100644 --- a/src/H5Ppublic.h +++ b/src/H5Ppublic.h @@ -373,6 +373,8 @@ H5_DLL ssize_t H5Pget_virtual_dsetname(hid_t dcpl_id, size_t index, char *name/*out*/, size_t size); H5_DLL herr_t H5Pset_external(hid_t plist_id, const char *name, off_t offset, hsize_t size); +H5_DLL herr_t H5Pset_chunk_opts(hid_t plist_id, unsigned opts); +H5_DLL herr_t H5Pget_chunk_opts(hid_t plist_id, unsigned *opts); H5_DLL int H5Pget_external_count(hid_t plist_id); H5_DLL herr_t H5Pget_external(hid_t plist_id, unsigned idx, size_t name_size, char *name/*out*/, off_t *offset/*out*/, diff --git a/src/H5Ptest.c b/src/H5Ptest.c index 8240f6a..f6cc97e 100644 --- a/src/H5Ptest.c +++ b/src/H5Ptest.c @@ -125,49 +125,3 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5P_open_class_path_test() */ - -/*-------------------------------------------------------------------------- - NAME - H5P_reset_external_file_test - PURPOSE - Routine to reset external file list - USAGE - herr_t H5P_reset_external_file_test(plist) - hid_t dcpl_id; IN: the property list - - RETURNS - Non-negative on success/Negative on failure - - PROGRAMMER - Peter Cao - April 30, 2007 ---------------------------------------------------------------------------*/ -herr_t -H5P_reset_external_file_test(hid_t dcpl_id) -{ - H5O_efl_t efl; /* External file list */ - H5P_genplist_t *plist; /* Property list */ - herr_t ret_value = SUCCEED; /* Return value */ - - FUNC_ENTER_NOAPI(FAIL) - - /* Check arguments */ - if(NULL == (plist = (H5P_genplist_t *)H5I_object(dcpl_id))) - HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list") - - /* get external file list */ - if(H5P_peek(plist, H5D_CRT_EXT_FILE_LIST_NAME, &efl) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get external file list") - - /* Clean up any values set for the external file-list */ - if(H5O_msg_reset(H5O_EFL_ID, &efl) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't release external file list info") - - /* set external file list */ - if(H5P_poke(plist, H5D_CRT_EXT_FILE_LIST_NAME, &efl) < 0) - HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set external file list") - -done: - FUNC_LEAVE_NOAPI(ret_value) -} /* H5P_reset_external_file_test() */ - diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c index 8511ff5..07fe371 100644 --- a/src/H5Tcommit.c +++ b/src/H5Tcommit.c @@ -351,7 +351,7 @@ H5T__commit(H5F_t *file, H5T_t *type, hid_t tcpl_id, hid_t dxpl_id) loc_init = TRUE; /* Set the latest format, if requested */ - if(H5F_USE_LATEST_FORMAT(file)) + if(H5F_USE_LATEST_FLAGS(file, H5F_LATEST_DATATYPE)) if(H5T_set_latest_version(type) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set latest version of datatype") diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h index 7d3361b..f402b36 100644 --- a/src/H5VMprivate.h +++ b/src/H5VMprivate.h @@ -49,6 +49,22 @@ typedef herr_t (*H5VM_opvv_func_t)(hsize_t dst_off, hsize_t src_off, #define H5VM_vector_zero(N,DST) HDmemset(DST,0,(N)*sizeof(*(DST))) +/* Given a coordinate offset array (COORDS) of type TYPE, move the unlimited + * dimension (UNLIM_DIM) value to offset 0, sliding any intermediate values down + * one position. */ +#define H5VM_swizzle_coords(TYPE,COORDS,UNLIM_DIM) { \ + /* COORDS must be an array of type TYPE */ \ + HDassert(sizeof(COORDS[0]) == sizeof(TYPE)); \ + \ + /* Nothing to do when unlimited dimension is at position 0 */ \ + if(0 != (UNLIM_DIM)) { \ + TYPE _tmp = (COORDS)[UNLIM_DIM]; \ + \ + HDmemmove(&(COORDS)[1], &(COORDS)[0], sizeof(TYPE) * (UNLIM_DIM)); \ + (COORDS)[0] = _tmp; \ + } /* end if */ \ +} + /* A null pointer is equivalent to a zero vector */ #define H5VM_ZERO NULL diff --git a/src/H5trace.c b/src/H5trace.c index 08b33af..0d64f13 100644 --- a/src/H5trace.c +++ b/src/H5trace.c @@ -491,6 +491,44 @@ H5_trace(const double *returning, const char *func, const char *type, ...) } /* end else */ break; + case 'k': + if(ptr) { + if(vp) + fprintf(out, "0x%lx", (unsigned long)vp); + else + fprintf(out, "NULL"); + } /* end if */ + else { + H5D_chunk_index_t idx = (H5D_chunk_index_t)va_arg(ap, int); + + switch(idx) { + case H5D_CHUNK_IDX_BTREE: + fprintf(out, "H5D_CHUNK_IDX_BTREE"); + break; + + case H5D_CHUNK_IDX_FARRAY: + fprintf(out, "H5D_CHUNK_IDX_FARRAY"); + break; + + case H5D_CHUNK_IDX_EARRAY: + fprintf(out, "H5D_CHUNK_IDX_EARRAY"); + break; + + case H5D_CHUNK_IDX_BT2: + fprintf(out, "H5D_CHUNK_IDX_BT2"); + break; + + case H5D_CHUNK_IDX_NTYPES: + fprintf(out, "ERROR: H5D_CHUNK_IDX_NTYPES (invalid value)"); + break; + + default: + fprintf(out, "UNKNOWN VALUE: %ld", (long)idx); + break; + } /* end switch */ + } /* end else */ + break; + case 'l': if(ptr) { if(vp) diff --git a/src/Makefile.am b/src/Makefile.am index 9f748bf..ec8c007 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -47,8 +47,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \ H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2stat.c H5B2test.c \ H5C.c \ H5CS.c \ - H5D.c H5Dbtree.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \ - H5Ddeprec.c H5Defl.c H5Dfill.c H5Dint.c \ + H5D.c H5Dbtree.c H5Dbtree2.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \ + H5Ddeprec.c H5Dearray.c H5Defl.c H5Dfarray.c H5Dfill.c H5Dint.c \ H5Dio.c H5Dlayout.c \ H5Doh.c H5Dscatgath.c H5Dselect.c H5Dtest.c H5Dvirtual.c \ H5E.c H5Edeprec.c H5Eint.c \ @@ -84,8 +84,10 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \ H5Olayout.c \ H5Olinfo.c H5Olink.c H5Omessage.c H5Omtime.c \ H5Oname.c H5Onull.c H5Opline.c H5Orefcount.c \ - H5Osdspace.c H5Oshared.c H5Ostab.c \ - H5Oshmesg.c H5Otest.c H5Ounknown.c \ + H5Osdspace.c H5Oshared.c \ + H5Oshmesg.c \ + H5Ostab.c \ + H5Otest.c H5Ounknown.c \ H5P.c H5Pacpl.c H5Pdapl.c H5Pdcpl.c \ H5Pdeprec.c H5Pdxpl.c H5Pencdec.c \ H5Pfapl.c H5Pfcpl.c H5Pfmpl.c \ diff --git a/test/dsets.c b/test/dsets.c index 886464a..c7a67b6 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -44,10 +44,13 @@ const char *FILENAME[] = { "huge_chunks", /* 7 */ "chunk_cache", /* 8 */ "big_chunk", /* 9 */ - "chunk_expand", /* 10 */ - "copy_dcpl_newfile",/* 11 */ - "layout_extend", /* 12 */ - "zero_chunk", /* 13 */ + "chunk_fast", /* 10 */ + "chunk_expand", /* 11 */ + "chunk_fixed", /* 12 */ + "copy_dcpl_newfile",/* 13 */ + "partial_chunks", /* 14 */ + "layout_extend", /* 15 */ + "zero_chunk", /* 16 */ NULL }; #define FILENAME_BUF_SIZE 1024 @@ -114,6 +117,13 @@ const char *FILENAME[] = { #define DSET_DEPREC_NAME_COMPACT "deprecated_compact" #define DSET_DEPREC_NAME_FILTER "deprecated_filter" +/* Dataset names for testing Fixed Array Indexing */ +#define DSET_FIXED_MAX "DSET_FIXED_MAX" +#define DSET_FIXED_NOMAX "DSET_FIXED_NOMAX" +#define DSET_FIXED_BIG "DSET_FIXED_BIG" +#define POINTS 72 +#define POINTS_BIG 2500 + #define USER_BLOCK 1024 #define SIXTY_FOUR_KB 65536 @@ -125,6 +135,7 @@ const char *FILENAME[] = { #define H5Z_FILTER_DEPREC 309 #define H5Z_FILTER_EXPAND 310 #define H5Z_FILTER_CAN_APPLY_TEST2 311 +#define H5Z_FILTER_COUNT 312 /* Flags for testing filters */ #define DISABLE_FLETCHER32 0 @@ -198,6 +209,8 @@ const char *FILENAME[] = { #define DSET_DIM2 200 int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2]; double points_dbl[DSET_DIM1][DSET_DIM2], check_dbl[DSET_DIM1][DSET_DIM2]; +size_t count_nbytes_read = 0; +size_t count_nbytes_written = 0; /* Local prototypes for filter functions */ static size_t filter_bogus(unsigned int flags, size_t cd_nelmts, @@ -212,6 +225,49 @@ static size_t filter_corrupt(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); static size_t filter_expand(unsigned int flags, size_t cd_nelmts, const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); +static size_t filter_count(unsigned int flags, size_t cd_nelmts, + const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf); + +/* This message derives from H5Z */ +const H5Z_class2_t H5Z_COUNT[1] = {{ + H5Z_CLASS_T_VERS, /* H5Z_class_t version */ + H5Z_FILTER_COUNT, /* Filter id number */ + 1, 1, /* Encoding and decoding enabled */ + "count", /* Filter name for debugging */ + NULL, /* The "can apply" callback */ + NULL, /* The "set local" callback */ + filter_count, /* The actual filter function */ +}}; + + +/*------------------------------------------------------------------------- + * Function: filter_count + * + * Purpose: This filter counts the number of bytes read and written, + * incrementing count_nbytes_read or count_nbytes_written as + * appropriate. + * + * Return: Success: Data chunk size + * + * Failure: 0 + * + * Programmer: Neil Fortner + * Wednesday, March 17, 2010 + * + *------------------------------------------------------------------------- + */ +static size_t +filter_count(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts, + const unsigned int H5_ATTR_UNUSED *cd_values, size_t nbytes, + size_t H5_ATTR_UNUSED *buf_size, void H5_ATTR_UNUSED **buf) +{ + if(flags & H5Z_FLAG_REVERSE) + count_nbytes_read += nbytes; + else + count_nbytes_written += nbytes; + + return nbytes; +} /*------------------------------------------------------------------------- @@ -905,7 +961,7 @@ test_layout_extend(hid_t fapl) TESTING("extendible dataset with various layout"); /* Create a file */ - h5_fixname(FILENAME[12], fapl, filename, sizeof filename); + h5_fixname(FILENAME[15], fapl, filename, sizeof filename); if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR @@ -2798,7 +2854,7 @@ test_nbit_int(hid_t file) mask = ~((unsigned)~0 << (precision + offset)) & ((unsigned)~0 << offset); for(i=0; i<(size_t)size[0]; i++) { for(j=0; j<(size_t)size[1]; j++) { - if((new_data[i][j] & mask) != (orig_data[i][j] & mask)) { + if(((unsigned)new_data[i][j] & mask) != ((unsigned)orig_data[i][j] & mask)) { H5_FAILED(); printf(" Read different values than written.\n"); printf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j); @@ -3337,9 +3393,9 @@ test_nbit_compound(hid_t file) s_mask = ~((unsigned)~0 << (precision[2] + offset[2])) & ((unsigned)~0 << offset[2]); for(i=0; i<size[0]; i++) { for(j=0; j<size[1]; j++) { - if((new_data[i][j].i & i_mask) != (orig_data[i][j].i & i_mask) || - (new_data[i][j].c & c_mask) != (orig_data[i][j].c & c_mask) || - (new_data[i][j].s & s_mask) != (orig_data[i][j].s & s_mask) || + if(((unsigned)new_data[i][j].i & i_mask) != ((unsigned)orig_data[i][j].i & i_mask) || + ((unsigned)new_data[i][j].c & c_mask) != ((unsigned)orig_data[i][j].c & c_mask) || + ((unsigned)new_data[i][j].s & s_mask) != ((unsigned)orig_data[i][j].s & s_mask) || (orig_data[i][j].f==orig_data[i][j].f && new_data[i][j].f != orig_data[i][j].f)) { H5_FAILED(); @@ -3595,16 +3651,16 @@ test_nbit_compound_2(hid_t file) for(m = 0; m < (size_t)array_dims[0]; m++) for(n = 0; n < (size_t)array_dims[1]; n++) - if((new_data[i][j].b[m][n]&b_mask)!=(orig_data[i][j].b[m][n]&b_mask)) { + if(((unsigned)new_data[i][j].b[m][n] & b_mask)!=((unsigned)orig_data[i][j].b[m][n] & b_mask)) { b_failed = 1; goto out; } for(m = 0; m < (size_t)array_dims[0]; m++) for(n = 0; n < (size_t)array_dims[1]; n++) - if((new_data[i][j].d[m][n].i & i_mask)!=(orig_data[i][j].d[m][n].i & i_mask)|| - (new_data[i][j].d[m][n].c & c_mask)!=(orig_data[i][j].d[m][n].c & c_mask)|| - (new_data[i][j].d[m][n].s & s_mask)!=(orig_data[i][j].d[m][n].s & s_mask)|| + if(((unsigned)new_data[i][j].d[m][n].i & i_mask) != ((unsigned)orig_data[i][j].d[m][n].i & i_mask)|| + ((unsigned)new_data[i][j].d[m][n].c & c_mask) != ((unsigned)orig_data[i][j].d[m][n].c & c_mask)|| + ((unsigned)new_data[i][j].d[m][n].s & s_mask) != ((unsigned)orig_data[i][j].d[m][n].s & s_mask)|| (new_data[i][j].d[m][n].f==new_data[i][j].d[m][n].f && new_data[i][j].d[m][n].f != new_data[i][j].d[m][n].f)) { d_failed = 1; @@ -3612,9 +3668,9 @@ test_nbit_compound_2(hid_t file) } out: - if((new_data[i][j].a.i & i_mask)!=(orig_data[i][j].a.i & i_mask)|| - (new_data[i][j].a.c & c_mask)!=(orig_data[i][j].a.c & c_mask)|| - (new_data[i][j].a.s & s_mask)!=(orig_data[i][j].a.s & s_mask)|| + if(((unsigned)new_data[i][j].a.i & i_mask) != ((unsigned)orig_data[i][j].a.i & i_mask)|| + ((unsigned)new_data[i][j].a.c & c_mask) != ((unsigned)orig_data[i][j].a.c & c_mask)|| + ((unsigned)new_data[i][j].a.s & s_mask) != ((unsigned)orig_data[i][j].a.s & s_mask)|| (new_data[i][j].a.f==new_data[i][j].a.f && new_data[i][j].a.f != new_data[i][j].a.f)|| new_data[i][j].v != orig_data[i][j].v || b_failed || d_failed) { @@ -6067,7 +6123,7 @@ test_copy_dcpl(hid_t file, hid_t fapl) /* Create a second file and create 2 datasets with the copies of the DCPLs in the first * file. Test whether the copies of DCPLs work. */ - h5_fixname(FILENAME[11], fapl, filename, sizeof filename); + h5_fixname(FILENAME[13], fapl, filename, sizeof filename); if((new_file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR @@ -6413,15 +6469,24 @@ static herr_t test_zero_dims(hid_t file) { hid_t s = -1, d = -1, dcpl = -1; - hsize_t dsize = 0, dmax = H5S_UNLIMITED, csize = 5; + hsize_t dzero = 0, dmax = H5S_UNLIMITED, csize = 5; + hid_t fapl; /* File access property list */ + H5D_chunk_index_t idx_type; /* Dataset chunk index type */ + H5F_libver_t low; /* File format low bound */ herr_t ret; TESTING("I/O on datasets with zero-sized dims"); + /* Get the file's file access property list */ + if((fapl = H5Fget_access_plist(file)) < 0) FAIL_STACK_ERROR + + /* Get library format */ + if(H5Pget_libver_bounds(fapl, &low, NULL) < 0) FAIL_STACK_ERROR + /* * One-dimensional dataset */ - if((s = H5Screate_simple(1, &dsize, &dmax)) < 0) FAIL_STACK_ERROR + if((s = H5Screate_simple(1, &dzero, &dmax)) < 0) FAIL_STACK_ERROR /* Try creating chunked dataset with undefined chunk dimensions */ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR @@ -6437,7 +6502,7 @@ test_zero_dims(hid_t file) /* Try creating chunked dataset with zero-sized chunk dimensions */ H5E_BEGIN_TRY { - ret = H5Pset_chunk(dcpl, 1, &dsize); + ret = H5Pset_chunk(dcpl, 1, &dzero); } H5E_END_TRY; if(ret > 0) FAIL_PUTS_ERROR("set zero-sized chunk dimensions") @@ -6449,6 +6514,16 @@ test_zero_dims(hid_t file) if(H5Pset_chunk(dcpl, 1, &csize) < 0) FAIL_STACK_ERROR if((d = H5Dcreate2(file, ZERODIM_DATASET, H5T_NATIVE_INT, s, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR + /* Get the chunk index type */ + if(H5D__layout_idx_type_test(d, &idx_type) < 0) FAIL_STACK_ERROR + + /* Verify index type */ + if(low == H5F_LIBVER_LATEST) { + if(idx_type != H5D_CHUNK_IDX_EARRAY) + FAIL_PUTS_ERROR("should be using extensible array as index"); + } else if(idx_type != H5D_CHUNK_IDX_BTREE) + FAIL_PUTS_ERROR("should be using v1 B-tree as index"); + /* Various no-op writes */ if(H5Dwrite(d, H5T_NATIVE_INT, s, s, H5P_DEFAULT, (void*)911) < 0) FAIL_STACK_ERROR if(H5Dwrite(d, H5T_NATIVE_INT, s, s, H5P_DEFAULT, NULL) < 0) FAIL_STACK_ERROR @@ -7850,6 +7925,545 @@ error: /*------------------------------------------------------------------------- + * Function: test_fixed_array + * + * Purpose: Tests support for Fixed Array and Implicit Indexing + * + * Create the following 3 datasets: + * 1) extendible chunked dataset with fixed max. dims + * 2) extendible chunked dataset with NULL max. dims + * 3) extendible chunked dataset with same max. dims + * (Note that the third dataset is created with bigger size for curr & max. dims + * so that Fixed Array Indexing with paging is exercised) + * + * Repeat the following test with/without compression filter + * Repeat the following test with H5D_ALLOC_TIME_EARLY/H5D_ALLOC_TIME_LATE/H5D_ALLOC_TIME_INCR + * For the old format, + * verify that v1 btree indexing type is used for + * all 3 datasets with all settings + * For the new format: + * Verify that Implicit Index type is used for + * #1, #2, #3 datasets when ALLOC_TIME_EARLY and compression are true + * Verify Fixed Array indexing type is used for + * #1, #2, #3 datasets with all other settings + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Vailin Choi; 2009 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_fixed_array(hid_t fapl) +{ + char filename[FILENAME_BUF_SIZE]; /* File name */ + hid_t fid = -1; /* File ID */ + hid_t dcpl = -1; /* Dataset creation property list ID */ + + hid_t sid = -1; /* Dataspace ID for dataset with fixed dimensions */ + hid_t sid_big = -1; /* Dataspate ID for big dataset */ + hid_t sid_max = -1; /* Dataspace ID for dataset with maximum dimensions set */ + + hid_t dsid = -1; /* Dataset ID for dataset with fixed dimensions */ + hid_t dsid_big = -1; /* Dataset ID for big dataset with fixed dimensions */ + hid_t dsid_max = -1; /* Dataset ID for dataset with maximum dimensions set */ + + hsize_t dim2[2] = {48, 18}; /* Dataset dimensions */ + hsize_t dim2_big[2] = {500, 60}; /* Big dataset dimensions */ + hsize_t dim2_max[2] = {120, 50}; /* Maximum dataset dimensions */ + + hid_t mem_id; /* Memory space ID */ + hid_t big_mem_id; /* Memory space ID for big dataset */ + + hsize_t msize[1] = {POINTS}; /* Size of memory space */ + hsize_t msize_big[1] = {POINTS_BIG}; /* Size of memory space for big dataset */ + + int wbuf[POINTS]; /* write buffer */ + int wbuf_big[POINTS_BIG]; /* write buffer for big dataset */ + int rbuf[POINTS]; /* read buffer */ + int rbuf_big[POINTS_BIG]; /* read buffer for big dataset */ + + hsize_t chunk_dim2[2] = {4, 3}; /* Chunk dimensions */ + int chunks[12][6]; /* # of chunks for dataset dimensions */ + int chunks_big[125][20]; /* # of chunks for big dataset dimensions */ + int chunk_row; /* chunk row index */ + int chunk_col; /* chunk column index */ + + hsize_t coord[POINTS][2]; /* datdaset coordinates */ + hsize_t coord_big[POINTS_BIG][2]; /* big dataset coordinates */ + + H5D_chunk_index_t idx_type; /* Dataset chunk index type */ + H5F_libver_t low, high; /* File format bounds */ + H5D_alloc_time_t alloc_time; /* Storage allocation time */ + +#ifdef H5_HAVE_FILTER_DEFLATE + unsigned compress; /* Whether chunks should be compressed */ +#endif /* H5_HAVE_FILTER_DEFLATE */ + + h5_stat_size_t empty_size; /* Size of an empty file */ + h5_stat_size_t file_size; /* Size of each file created */ + + size_t i, j; /* local index variables */ + herr_t ret; /* Generic return value */ + + TESTING("datasets w/fixed array as chunk index"); + + h5_fixname(FILENAME[12], fapl, filename, sizeof filename); + + /* Check if we are using the latest version of the format */ + if(H5Pget_libver_bounds(fapl, &low, &high) < 0) FAIL_STACK_ERROR + + /* Create and close the file to get the file size */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + STACK_ERROR + if(H5Fclose(fid) < 0) + STACK_ERROR + + /* Get the size of the empty file */ + if((empty_size = h5_get_file_size(filename, fapl)) < 0) + TEST_ERROR + +#ifdef H5_HAVE_FILTER_DEFLATE + /* Loop over compressing chunks */ + for(compress = FALSE; compress <= TRUE; compress++) { +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /* Loop over storage allocation time */ + for(alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) { + /* Create file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR + + /* Create dataset creation property list */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR + + /* Set chunking */ + if((ret = H5Pset_chunk(dcpl, 2, chunk_dim2)) < 0) + FAIL_PUTS_ERROR(" Problem with setting chunk.") + +#ifdef H5_HAVE_FILTER_DEFLATE + /* Check if we should compress the chunks */ + if(compress) + if(H5Pset_deflate(dcpl, 9) < 0) FAIL_STACK_ERROR +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /* Set fill time */ + if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) FAIL_STACK_ERROR + + /* Set allocation time */ + if(H5Pset_alloc_time(dcpl, alloc_time) < 0) FAIL_STACK_ERROR + + /* Initialization of chunk array for repeated coordinates */ + for(i = 0; i < dim2[0]/chunk_dim2[0]; i++) + for(j = 0; j < dim2[1]/chunk_dim2[1]; j++) + chunks[i][j] = 0; + + /* Generate random point coordinates. Only one point is selected per chunk */ + for(i = 0; i < POINTS; i++){ + do { + chunk_row = (int)HDrandom () % (int)(dim2[0]/chunk_dim2[0]); + chunk_col = (int)HDrandom () % (int)(dim2[1]/chunk_dim2[1]); + } while (chunks[chunk_row][chunk_col]); + + wbuf[i] = chunks[chunk_row][chunk_col] = chunk_row+chunk_col+1; + coord[i][0] = (hsize_t)chunk_row * chunk_dim2[0]; + coord[i][1] = (hsize_t)chunk_col * chunk_dim2[1]; + } /* end for */ + + /* Create first dataset with cur and max dimensions */ + if((sid_max = H5Screate_simple(2, dim2, dim2_max)) < 0) FAIL_STACK_ERROR + dsid_max = H5Dcreate2(fid, DSET_FIXED_MAX, H5T_NATIVE_INT, sid_max, H5P_DEFAULT, dcpl, H5P_DEFAULT); + if(dsid_max < 0) + FAIL_PUTS_ERROR(" Creating Chunked Dataset with maximum dimensions.") + + /* Get the chunk index type */ + if(H5D__layout_idx_type_test(dsid_max, &idx_type) < 0) FAIL_STACK_ERROR + + /* Chunk index type depends on whether we are using the latest version of the format */ + if(low == H5F_LIBVER_LATEST) { + if(alloc_time == H5D_ALLOC_TIME_EARLY +#ifdef H5_HAVE_FILTER_DEFLATE + && !compress +#endif /* H5_HAVE_FILTER_DEFLATE */ + ) { + } else if (idx_type != H5D_CHUNK_IDX_FARRAY) + FAIL_PUTS_ERROR("should be using Fixed Array as index"); + } /* end if */ + else { + if(idx_type != H5D_CHUNK_IDX_BTREE) + FAIL_PUTS_ERROR("should be using v1 B-tree as index"); + } /* end else */ + + /* Create dataspace for write buffer */ + if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR; + + /* Select the random points for writing */ + if(H5Sselect_elements(sid_max, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0) + TEST_ERROR; + + /* Write into dataset */ + if(H5Dwrite(dsid_max, H5T_NATIVE_INT, mem_id, sid_max, H5P_DEFAULT, wbuf) < 0) TEST_ERROR; + + /* Closing */ + if(H5Dclose(dsid_max) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid_max) < 0) FAIL_STACK_ERROR + if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR + + + /* Create second dataset with curr dim but NULL max dim */ + if((sid = H5Screate_simple(2, dim2, NULL)) < 0) FAIL_STACK_ERROR + dsid = H5Dcreate2(fid, DSET_FIXED_NOMAX, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + if(dsid < 0) + FAIL_PUTS_ERROR(" Creating Chunked Dataset.") + + /* Get the chunk index type */ + if(H5D__layout_idx_type_test(dsid, &idx_type) < 0) FAIL_STACK_ERROR + + /* Chunk index type depends on whether we are using the latest version of the format */ + if(low == H5F_LIBVER_LATEST) { + if(alloc_time == H5D_ALLOC_TIME_EARLY +#ifdef H5_HAVE_FILTER_DEFLATE + && !compress +#endif /* H5_HAVE_FILTER_DEFLATE */ + ) { + } else if(idx_type != H5D_CHUNK_IDX_FARRAY) + FAIL_PUTS_ERROR("should be using Fixed Array as index"); + } else { + if(idx_type != H5D_CHUNK_IDX_BTREE) + FAIL_PUTS_ERROR("should be using v1 B-tree as index"); + } /* end else */ + + /* Create dataspace for write buffer */ + if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR; + + /* Select the random points for writing */ + if(H5Sselect_elements(sid, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0) + TEST_ERROR; + + /* Write into dataset */ + if(H5Dwrite(dsid, H5T_NATIVE_INT, mem_id, sid, H5P_DEFAULT, wbuf) < 0) TEST_ERROR; + + /* Closing */ + if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) FAIL_STACK_ERROR + if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR + + /* Create the third dataset with bigger size and both curr & max dimensions are the same */ + if((sid_big = H5Screate_simple(2, dim2_big, dim2_big)) < 0) FAIL_STACK_ERROR + dsid_big = H5Dcreate2(fid, DSET_FIXED_BIG, H5T_NATIVE_INT, sid_big, H5P_DEFAULT, dcpl, H5P_DEFAULT); + if(dsid_big < 0) + FAIL_PUTS_ERROR(" Creating Big Chunked Dataset.") + + /* Get the chunk index type */ + if(H5D__layout_idx_type_test(dsid_big, &idx_type) < 0) FAIL_STACK_ERROR + + /* Chunk index type depends on whether we are using the latest version of the format */ + if(low == H5F_LIBVER_LATEST) { + if(alloc_time == H5D_ALLOC_TIME_EARLY +#ifdef H5_HAVE_FILTER_DEFLATE + && !compress +#endif /* H5_HAVE_FILTER_DEFLATE */ + ) { + } else if(idx_type != H5D_CHUNK_IDX_FARRAY) + FAIL_PUTS_ERROR("should be using Fixed Array as index"); + } /* end if */ + else { + if(idx_type != H5D_CHUNK_IDX_BTREE) + FAIL_PUTS_ERROR("should be using v1 B-tree as index"); + } /* end else */ + + /* Initialization of chunk array for repeated coordinates */ + for(i = 0; i < dim2_big[0]/chunk_dim2[0]; i++) + for(j = 0; j < dim2_big[1]/chunk_dim2[1]; j++) + chunks_big[i][j] = 0; + + /* Generate random point coordinates. Only one point is selected per chunk */ + for(i = 0; i < POINTS_BIG; i++){ + do { + chunk_row = (int)HDrandom () % (int)(dim2_big[0]/chunk_dim2[0]); + chunk_col = (int)HDrandom () % (int)(dim2_big[1]/chunk_dim2[1]); + } while (chunks_big[chunk_row][chunk_col]); + + wbuf_big[i] = chunks_big[chunk_row][chunk_col] = chunk_row+chunk_col+1; + coord_big[i][0] = (hsize_t)chunk_row * chunk_dim2[0]; + coord_big[i][1] = (hsize_t)chunk_col * chunk_dim2[1]; + } /* end for */ + + /* Create dataspace for write buffer */ + if((big_mem_id = H5Screate_simple(1, msize_big, NULL)) < 0) TEST_ERROR; + + /* Select the random points for writing */ + if(H5Sselect_elements(sid_big, H5S_SELECT_SET, POINTS_BIG, (const hsize_t *)coord_big) < 0) + TEST_ERROR; + + /* Write into dataset */ + if(H5Dwrite(dsid_big, H5T_NATIVE_INT, big_mem_id, sid_big, H5P_DEFAULT, wbuf_big) < 0) TEST_ERROR; + + /* Closing */ + if(H5Dclose(dsid_big) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid_big) < 0) FAIL_STACK_ERROR + if(H5Sclose(big_mem_id) < 0) FAIL_STACK_ERROR + if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR + + /* Open the first dataset */ + if((dsid = H5Dopen2(fid, DSET_FIXED_MAX, H5P_DEFAULT)) < 0) TEST_ERROR; + + /* Get dataset dataspace */ + if((sid = H5Dget_space(dsid)) < 0) TEST_ERROR; + + /* Create dataspace for read buffer */ + if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR; + + /* Select the random points for reading */ + if(H5Sselect_elements (sid, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0) TEST_ERROR; + + /* Read from dataset */ + if(H5Dread(dsid, H5T_NATIVE_INT, mem_id, sid, H5P_DEFAULT, rbuf) < 0) TEST_ERROR; + + /* Verify that written and read data are the same */ + for(i = 0; i < POINTS; i++) + if(rbuf[i] != wbuf[i]){ + printf(" Line %d: Incorrect value, wbuf[%u]=%d, rbuf[%u]=%d\n", + __LINE__,(unsigned)i,wbuf[i],(unsigned)i,rbuf[i]); + TEST_ERROR; + } /* end if */ + + /* Closing */ + if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) FAIL_STACK_ERROR + if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR + + /* Open the second dataset */ + if((dsid = H5Dopen2(fid, DSET_FIXED_NOMAX, H5P_DEFAULT)) < 0) TEST_ERROR; + + /* Get dataset dataspace */ + if((sid = H5Dget_space(dsid)) < 0) TEST_ERROR; + + /* Create dataspace for read buffer */ + if((mem_id = H5Screate_simple(1, msize, NULL)) < 0) TEST_ERROR; + + /* Select the random points for reading */ + if(H5Sselect_elements (sid, H5S_SELECT_SET, POINTS, (const hsize_t *)coord) < 0) TEST_ERROR; + + /* Read from dataset */ + if(H5Dread(dsid, H5T_NATIVE_INT, mem_id, sid, H5P_DEFAULT, rbuf) < 0) TEST_ERROR; + + /* Verify that written and read data are the same */ + for(i = 0; i < POINTS; i++) + if(rbuf[i] != wbuf[i]){ + printf(" Line %d: Incorrect value, wbuf[%u]=%d, rbuf[%u]=%d\n", + __LINE__,(unsigned)i,wbuf[i],(unsigned)i,rbuf[i]); + TEST_ERROR; + } /* end if */ + + /* Closing */ + if(H5Dclose(dsid) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid) < 0) FAIL_STACK_ERROR + if(H5Sclose(mem_id) < 0) FAIL_STACK_ERROR + + /* Open the third dataset */ + if((dsid_big = H5Dopen2(fid, DSET_FIXED_BIG, H5P_DEFAULT)) < 0) TEST_ERROR; + /* Get dataset dataspace */ + if((sid_big = H5Dget_space(dsid_big)) < 0) TEST_ERROR; + + /* Create dataspace for read buffer */ + if((big_mem_id = H5Screate_simple(1, msize_big, NULL)) < 0) TEST_ERROR; + + /* Select the random points for reading */ + if(H5Sselect_elements (sid_big, H5S_SELECT_SET, POINTS_BIG, (const hsize_t *)coord_big) < 0) TEST_ERROR; + /* Read from dataset */ + if(H5Dread(dsid_big, H5T_NATIVE_INT, big_mem_id, sid_big, H5P_DEFAULT, rbuf_big) < 0) TEST_ERROR; + + /* Verify that written and read data are the same */ + for(i = 0; i < POINTS_BIG; i++) + if(rbuf_big[i] != wbuf_big[i]){ + printf(" Line %d: Incorrect value, wbuf_bif[%u]=%d, rbuf_big[%u]=%d\n", + __LINE__,(unsigned)i,wbuf_big[i],(unsigned)i,rbuf_big[i]); + TEST_ERROR; + } /* end if */ + + /* Closing */ + if(H5Dclose(dsid_big) < 0) FAIL_STACK_ERROR + if(H5Sclose(sid_big) < 0) FAIL_STACK_ERROR + if(H5Sclose(big_mem_id) < 0) FAIL_STACK_ERROR + + /* Delete datasets */ + if(H5Ldelete(fid, DSET_FIXED_BIG, H5P_DEFAULT) < 0) FAIL_STACK_ERROR + if(H5Ldelete(fid, DSET_FIXED_NOMAX, H5P_DEFAULT) < 0) FAIL_STACK_ERROR + if(H5Ldelete(fid, DSET_FIXED_MAX, H5P_DEFAULT) < 0) FAIL_STACK_ERROR + + /* Close everything */ + if(H5Fclose(fid) < 0) FAIL_STACK_ERROR + + /* Get the size of the file */ + if((file_size = h5_get_file_size(filename, fapl)) < 0) + TEST_ERROR + + /* Verify the file is correct size */ + if(file_size != empty_size) + TEST_ERROR + + } /* end for */ +#ifdef H5_HAVE_FILTER_DEFLATE + } /* end for */ +#endif /* H5_HAVE_FILTER_DEFLATE */ + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Pclose(dcpl); + H5Dclose(dsid); + H5Sclose(sid); + H5Sclose(mem_id); + H5Fclose(fid); + } H5E_END_TRY; + return -1; +} /* end test_fixed_array() */ + +/*------------------------------------------------------------------------- + * + * test_unfiltered_edge_chunks(): + * Tests that partial edge chunks aren't filtered when the + * H5D_CHUNK_FILTER_PARTIAL_CHUNKS option is set. + * + * Programmer: Neil Fortner; 17th March, 2010 + * + *------------------------------------------------------------------------- + */ +static herr_t +test_unfiltered_edge_chunks(hid_t fapl) +{ + hid_t fid = -1; /* File id */ + hid_t did = -1; /* Dataset id */ + hid_t sid = -1; /* Dataspace id */ + hid_t dcpl = -1; /* DCPL id */ + hsize_t dim[2] = {4, 3}; /* Dataset dimensions */ + hsize_t cdim[2] = {2, 2}; /* Chunk dimension */ + char wbuf[4][3]; /* Write buffer */ + char rbuf[4][3]; /* Read buffer */ + char filename[FILENAME_BUF_SIZE] = ""; /* old test file name */ + unsigned opts; /* Chunk options */ + unsigned i, j; /* Local index variables */ + + /* Output message about test being performed */ + TESTING("disabled partial chunk filters"); + + h5_fixname(FILENAME[14], fapl, filename, sizeof filename); + + /* Create the file */ + if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) + TEST_ERROR + + /* Register byte-counting filter */ + if(H5Zregister(H5Z_COUNT) < 0) + TEST_ERROR + + /* Create dataspace */ + if((sid = H5Screate_simple(2, dim, NULL)) < 0) + TEST_ERROR + + /* Create DCPL */ + if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) + TEST_ERROR + + /* Set chunk dimensions */ + if(H5Pset_chunk(dcpl, 2, cdim) < 0) + TEST_ERROR + + /* Add "count" filter */ + if(H5Pset_filter(dcpl, H5Z_FILTER_COUNT, 0u, (size_t)0, NULL) < 0) + TEST_ERROR + + /* Disable filters on partial chunks */ + if(H5Pget_chunk_opts(dcpl, &opts) < 0) + TEST_ERROR + opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + if(H5Pset_chunk_opts(dcpl, opts) < 0) + TEST_ERROR + + /* Initialize write buffer */ + for(i=0; i<dim[0]; i++) + for(j=0; j<dim[1]; j++) + wbuf[i][j] = (char)(2 * i) - (char)j; + + /* Reset byte counts */ + count_nbytes_read = (size_t)0; + count_nbytes_written = (size_t)0; + + /* Create dataset */ + if((did = H5Dcreate2(fid, DSET_CHUNKED_NAME, H5T_NATIVE_CHAR, sid, + H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Nothing should have been written, as we are not using early allocation */ + if(count_nbytes_read != (size_t)0) + TEST_ERROR + if(count_nbytes_written != (size_t)0) + TEST_ERROR + + /* Write data */ + if(H5Dwrite(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0) + TEST_ERROR + + /* Close dataset */ + if(H5Dclose(did) < 0) + TEST_ERROR + + /* Make sure only 2 of the 4 chunks were written through the filter (4 bytes + * each) */ + if(count_nbytes_read != (size_t)0) + TEST_ERROR + if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1])) + TEST_ERROR + + /* Reopen the dataset */ + if((did = H5Dopen2(fid, DSET_CHUNKED_NAME, H5P_DEFAULT)) < 0) + TEST_ERROR + + /* Read the dataset */ + if(H5Dread(did, H5T_NATIVE_CHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0) + TEST_ERROR + + /* Verify that data read == data written */ + for(i=0; i<dim[0]; i++) + for(j=0; j<dim[1]; j++) + if(rbuf[i][j] != wbuf[i][j]) + TEST_ERROR + + /* Make sure only 2 of the 4 chunks were read through the filter (4 bytes + * each) */ + if(count_nbytes_read != (size_t)(2 * cdim[0] * cdim[1])) + TEST_ERROR + if(count_nbytes_written != (size_t)(2 * cdim[0] * cdim[1])) + TEST_ERROR + + /* Close IDs */ + if(H5Dclose(did) < 0) + TEST_ERROR + if(H5Pclose(dcpl) < 0) + TEST_ERROR + if(H5Sclose(sid) < 0) + TEST_ERROR + if(H5Fclose(fid) < 0) + TEST_ERROR + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY { + H5Dclose(did); + H5Pclose(dcpl); + H5Sclose(sid); + H5Fclose(fid); + } H5E_END_TRY; + return -1; +} /* test_unfiltered_edge_chunks */ + + +/*------------------------------------------------------------------------- * Function: test_large_chunk_shrink * * Purpose: Tests support for shrinking a chunk larger than 1 MB by a @@ -7991,7 +8605,7 @@ test_zero_dim_dset(hid_t fapl) TESTING("shrinking large chunk"); - h5_fixname(FILENAME[13], fapl, filename, sizeof filename); + h5_fixname(FILENAME[16], fapl, filename, sizeof filename); /* Create file */ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR @@ -9186,6 +9800,8 @@ main(void) nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0); nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0); nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0); + nerrors += (test_fixed_array(my_fapl) < 0 ? 1 : 0); + nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0); nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0); nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0); diff --git a/test/objcopy.c b/test/objcopy.c index 9bdb50d..e435dd1 100644 --- a/test/objcopy.c +++ b/test/objcopy.c @@ -1219,47 +1219,10 @@ compare_datasets(hid_t did, hid_t did2, hid_t pid, const void *wbuf) /* Open the dataset creation property list for the destination dataset */ if((dcpl2 = H5Dget_create_plist(did2)) < 0) TEST_ERROR - /* If external file storage is being used, the value stored in the - * dcpl will be a heap ID, which is not guaranteed to be the same in - * source and destination files. - * Instead, compare the actual external file values and then - * delete this property from the dcpls before comparing them. - */ - if((ext_count = H5Pget_external_count(dcpl)) < 0) TEST_ERROR - - if(ext_count > 0) - { - unsigned x; /* Counter varaible */ - char name1[NAME_BUF_SIZE]; - char name2[NAME_BUF_SIZE]; - off_t offset1=0; - off_t offset2=0; - hsize_t size1=0; - hsize_t size2=0; - - if(H5Pget_external_count(dcpl2) != ext_count) TEST_ERROR - - /* Ensure that all external file information is the same */ - for(x=0; x < (unsigned) ext_count; ++x) - { - if(H5Pget_external(dcpl, x, (size_t)NAME_BUF_SIZE, name1, &offset1, &size1) < 0) TEST_ERROR - if(H5Pget_external(dcpl2, x, (size_t)NAME_BUF_SIZE, name2, &offset2, &size2) < 0) TEST_ERROR - - if(offset1 != offset2) TEST_ERROR - if(size1 != size2) TEST_ERROR - if(HDstrcmp(name1, name2) != 0) TEST_ERROR - } - - /* Reset external file information from the dcpls */ - /* (Directly removing default property causes memory leak) */ - if (H5P_reset_external_file_test(dcpl) < 0) TEST_ERROR - if (H5P_reset_external_file_test(dcpl2) < 0) TEST_ERROR - } - /* Compare the rest of the dataset creation property lists */ if(H5Pequal(dcpl, dcpl2) != TRUE) TEST_ERROR - /* Get the number of filters on dataset */ + /* Get the number of filters on dataset (for later) */ if((nfilters = H5Pget_nfilters(dcpl)) < 0) TEST_ERROR /* close the source dataset creation property list */ @@ -3084,6 +3047,146 @@ error: /*------------------------------------------------------------------------- + * Function: test_copy_dataset_no_edge_filt + * + * Purpose: Create a compressed, chunked dataset in SRC file and copy it to DST file + * + * Return: Success: 0 + * Failure: number of errors + * + * Programmer: Neil Fortner + * Tuesday, May 11, 2010 + * Mostly copied from test_copy_dataset_compressed, by + * Quincey Koziol + * + *------------------------------------------------------------------------- + */ +static int +test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, + hid_t dst_fapl) +{ +#ifdef H5_HAVE_FILTER_DEFLATE + hid_t fid_src = -1, fid_dst = -1; /* File IDs */ + hid_t sid = -1; /* Dataspace ID */ + hid_t pid = -1; /* Dataset creation property list ID */ + hid_t did = -1, did2 = -1; /* Dataset IDs */ + hsize_t dim2d[2]; /* Dataset dimensions */ + hsize_t chunk_dim2d[2] ={CHUNK_SIZE_1, CHUNK_SIZE_2}; /* Chunk dimensions */ + float buf[DIM_SIZE_1][DIM_SIZE_2]; /* Buffer for writing data */ + int i, j; /* Local index variables */ + char src_filename[NAME_BUF_SIZE]; + char dst_filename[NAME_BUF_SIZE]; +#endif /* H5_HAVE_FILTER_DEFLATE */ + + TESTING("H5Ocopy(): compressed dataset with no edge filters"); + +#ifndef H5_HAVE_FILTER_DEFLATE + SKIPPED(); + puts(" Deflation filter not available"); +#else /* H5_HAVE_FILTER_DEFLATE */ + /* set initial data values */ + for (i=0; i<DIM_SIZE_1; i++) + for (j=0; j<DIM_SIZE_2; j++) + buf[i][j] = 100.0F; /* Something easy to compress */ + + /* Initialize the filenames */ + h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename); + h5_fixname(FILENAME[1], dst_fapl, dst_filename, sizeof dst_filename); + + /* Reset file address checking info */ + addr_reset(); + + /* create source file */ + if((fid_src = H5Fcreate(src_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) TEST_ERROR + + /* Set dataspace dimensions */ + dim2d[0]=DIM_SIZE_1; + dim2d[1]=DIM_SIZE_2; + + /* create dataspace */ + if((sid = H5Screate_simple(2, dim2d, NULL)) < 0) TEST_ERROR + + /* create and set comp & chunk plist, and disable partial chunk filters */ + if((pid = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR + if(H5Pset_chunk(pid, 2, chunk_dim2d) < 0) TEST_ERROR + if(H5Pset_deflate(pid, 9) < 0) TEST_ERROR + if(H5Pset_chunk_opts(pid, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) TEST_ERROR + + /* create dataset */ + if((did = H5Dcreate2(fid_src, NAME_DATASET_CHUNKED, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, pid, H5P_DEFAULT)) < 0) TEST_ERROR + + /* close chunk plist */ + if(H5Pclose(pid) < 0) TEST_ERROR + + /* write data into file */ + if(H5Dwrite(did, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0) TEST_ERROR + + /* close dataspace */ + if(H5Sclose(sid) < 0) TEST_ERROR + + /* attach attributes to the dataset */ + if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR + + /* close the dataset */ + if(H5Dclose(did) < 0) TEST_ERROR + + /* close the SRC file */ + if(H5Fclose(fid_src) < 0) TEST_ERROR + + + /* open the source file with read-only */ + if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR + + /* create destination file */ + if((fid_dst = H5Fcreate(dst_filename, H5F_ACC_TRUNC, fcpl_dst, dst_fapl)) < 0) TEST_ERROR + + /* Create an uncopied object in destination file so that addresses in source and destination files aren't the same */ + if(H5Gclose(H5Gcreate2(fid_dst, NAME_GROUP_UNCOPIED, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR + + /* copy the dataset from SRC to DST */ + if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR + + /* open the dataset for copy */ + if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR + + /* open the destination dataset */ + if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR + + /* Check if the datasets are equal */ + if(compare_datasets(did, did2, H5P_DEFAULT, NULL) != TRUE) TEST_ERROR + + /* close the destination dataset */ + if(H5Dclose(did2) < 0) TEST_ERROR + + /* close the source dataset */ + if(H5Dclose(did) < 0) TEST_ERROR + + /* close the SRC file */ + if(H5Fclose(fid_src) < 0) TEST_ERROR + + /* close the DST file */ + if(H5Fclose(fid_dst) < 0) TEST_ERROR + + PASSED(); +#endif /* H5_HAVE_FILTER_DEFLATE */ + return 0; + +#ifdef H5_HAVE_FILTER_DEFLATE +error: + H5E_BEGIN_TRY { + H5Dclose(did2); + H5Dclose(did); + H5Pclose(pid); + H5Sclose(sid); + H5Fclose(fid_dst); + H5Fclose(fid_src); + } H5E_END_TRY; + return 1; +#endif /* H5_HAVE_FILTER_DEFLATE */ +} /* end test_copy_dataset_no_edge_filt */ + + +/*------------------------------------------------------------------------- * Function: test_copy_dataset_compact * * Purpose: Create a compact dataset in SRC file and copy it to DST file @@ -12376,6 +12479,7 @@ main(void) nerrors += test_copy_dataset_chunked_empty(fcpl_src, fcpl_dst, src_fapl, dst_fapl); nerrors += test_copy_dataset_chunked_sparse(fcpl_src, fcpl_dst, src_fapl, dst_fapl); nerrors += test_copy_dataset_compressed(fcpl_src, fcpl_dst, src_fapl, dst_fapl); + nerrors += test_copy_dataset_no_edge_filt(fcpl_src, fcpl_dst, src_fapl, dst_fapl); nerrors += test_copy_dataset_compact(fcpl_src, fcpl_dst, src_fapl, dst_fapl); nerrors += test_copy_dataset_multi_ohdr_chunks(fcpl_src, fcpl_dst, src_fapl, dst_fapl); nerrors += test_copy_dataset_attr_named_dtype(fcpl_src, fcpl_dst, src_fapl, dst_fapl); diff --git a/test/set_extent.c b/test/set_extent.c index acfdc5b..7fe8d75 100644 --- a/test/set_extent.c +++ b/test/set_extent.c @@ -46,8 +46,9 @@ const char *FILENAME[] = { #define CONFIG_COMPRESS 0x01u #define CONFIG_FILL 0x02u #define CONFIG_EARLY_ALLOC 0x04u +#define CONFIG_UNFILT_EDGE 0x08u #define CONFIG_ALL (CONFIG_COMPRESS + CONFIG_FILL \ - + CONFIG_EARLY_ALLOC) + + CONFIG_EARLY_ALLOC + CONFIG_UNFILT_EDGE) #define FILL_VALUE -1 #define DO_RANKS_PRINT_CONFIG(TEST) { \ printf(" Config:\n"); \ @@ -56,6 +57,8 @@ const char *FILENAME[] = { printf(" Fill value: %s\n", (do_fillvalue ? "yes" : "no")); \ printf(" Early allocation: %s\n", (config & CONFIG_EARLY_ALLOC ? "yes" \ : "no")); \ + printf(" Edge chunk filters: %s\n", (config & CONFIG_UNFILT_EDGE \ + ? "disabled" : "enabled")); \ } /* end DO_RANKS_PRINT_CONFIG */ #define RANK1 1 @@ -85,18 +88,22 @@ static int do_layouts( hid_t fapl ); static int test_rank1( hid_t fapl, hid_t dcpl, hbool_t do_fill_value, + hbool_t disable_edge_filters, hbool_t set_istore_k); static int test_rank2( hid_t fapl, hid_t dcpl, hbool_t do_fill_value, + hbool_t disable_edge_filters, hbool_t set_istore_k); static int test_rank3( hid_t fapl, hid_t dcpl, hbool_t do_fill_value, + hbool_t disable_edge_filters, hbool_t set_istore_k); static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue, + hbool_t disable_edge_filters, hbool_t do_sparse); static int test_external( hid_t fapl ); @@ -211,7 +218,8 @@ error: static int do_ranks( hid_t fapl ) { - hbool_t do_fillvalue = 0; + hbool_t do_fillvalue = FALSE; + hbool_t disable_edge_filters = FALSE; hid_t dcpl = -1; int fillvalue = FILL_VALUE; unsigned config; @@ -247,6 +255,11 @@ static int do_ranks( hid_t fapl ) if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0) TEST_ERROR + if(config & CONFIG_UNFILT_EDGE) + disable_edge_filters = TRUE; + else + disable_edge_filters = FALSE; + /* Run tests */ if(do_fillvalue) { unsigned ifset; @@ -261,25 +274,25 @@ static int do_ranks( hid_t fapl ) if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) TEST_ERROR - if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 1") printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET" : "H5D_FILL_TIME_ALLOC")); goto error; } /* end if */ - if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 2") printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET" : "H5D_FILL_TIME_ALLOC")); goto error; } /* end if */ - if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 3") printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET" : "H5D_FILL_TIME_ALLOC")); goto error; } /* end if */ - if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0) { + if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree") printf(" Fill time: %s\n", (ifset ? "H5D_FILL_TIME_IFSET" : "H5D_FILL_TIME_ALLOC")); @@ -293,19 +306,19 @@ static int do_ranks( hid_t fapl ) if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC) < 0) TEST_ERROR - if(test_rank1(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_rank1(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 1") goto error; } /* end if */ - if(test_rank2(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 2") goto error; } /* end if */ - if(test_rank3(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_rank3(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 3") goto error; } /* end if */ - if(test_rank2(fapl, dcpl, do_fillvalue, TRUE) < 0) { + if(test_rank2(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) { DO_RANKS_PRINT_CONFIG("Rank 2 with non-default indexed storage B-tree") goto error; } /* end if */ @@ -316,13 +329,13 @@ static int do_ranks( hid_t fapl ) if(H5Pset_fill_time(dcpl, H5D_FILL_TIME_IFSET) < 0) TEST_ERROR - if(test_random_rank4(fapl, dcpl, do_fillvalue, FALSE) < 0) { + if(test_random_rank4(fapl, dcpl, do_fillvalue, disable_edge_filters, FALSE) < 0) { DO_RANKS_PRINT_CONFIG("Randomized rank 4") goto error; } /* end if */ if(!(config & CONFIG_EARLY_ALLOC)) - if(test_random_rank4(fapl, dcpl, do_fillvalue, TRUE) < 0) { + if(test_random_rank4(fapl, dcpl, do_fillvalue, disable_edge_filters, TRUE) < 0) { DO_RANKS_PRINT_CONFIG("Randomized rank 4 with sparse allocation") goto error; } /* end if */ @@ -376,6 +389,7 @@ error: static int test_rank1( hid_t fapl, hid_t dcpl, hbool_t do_fill_value, + hbool_t disable_edge_filters, hbool_t set_istore_k) { @@ -433,6 +447,9 @@ static int test_rank1( hid_t fapl, TEST_ERROR if(H5Pset_chunk(my_dcpl, RANK1, dims_c) < 0) TEST_ERROR + if(disable_edge_filters) + if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) + TEST_ERROR /*------------------------------------------------------------------------- * create, write dataset @@ -713,6 +730,7 @@ error: static int test_rank2( hid_t fapl, hid_t dcpl, hbool_t do_fill_value, + hbool_t disable_edge_filters, hbool_t set_istore_k) { @@ -793,6 +811,9 @@ static int test_rank2( hid_t fapl, { TEST_ERROR } + if(disable_edge_filters) + if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) + TEST_ERROR /*------------------------------------------------------------------------- * Procedure 1 @@ -1328,6 +1349,7 @@ error: static int test_rank3( hid_t fapl, hid_t dcpl, hbool_t do_fill_value, + hbool_t disable_edge_filters, hbool_t set_istore_k) { @@ -1414,6 +1436,9 @@ static int test_rank3( hid_t fapl, { TEST_ERROR } + if(disable_edge_filters) + if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) + TEST_ERROR /*------------------------------------------------------------------------- * create, write array @@ -2488,7 +2513,7 @@ error: *------------------------------------------------------------------------- */ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue, - hbool_t do_sparse ) + hbool_t disable_edge_filters, hbool_t do_sparse ) { hid_t file = -1; hid_t dset = -1; @@ -2532,6 +2557,9 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue, TEST_ERROR if(H5Pset_chunk(my_dcpl, 4, cdims) < 0) TEST_ERROR + if(disable_edge_filters) + if(H5Pset_chunk_opts(my_dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS) < 0) + TEST_ERROR if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_INT, fspace, H5P_DEFAULT, my_dcpl, H5P_DEFAULT)) < 0) TEST_ERROR diff --git a/test/tarray.c b/test/tarray.c index c6e9c0d..4a87981 100644 --- a/test/tarray.c +++ b/test/tarray.c @@ -1639,9 +1639,9 @@ test_array_bkg(void) for (i = 0; i < dtsinfo->nsubfields; i++) dtsinfo->name[i] = (char *)HDcalloc((size_t)20, sizeof(char)); - strcpy(dtsinfo->name[0], "One"); - strcpy(dtsinfo->name[1], "Two"); - strcpy(dtsinfo->name[2], "Three"); + HDstrcpy(dtsinfo->name[0], "One"); + HDstrcpy(dtsinfo->name[1], "Two"); + HDstrcpy(dtsinfo->name[2], "Three"); /* Create file */ @@ -1859,32 +1859,38 @@ test_array_bkg(void) HDfree(dtsinfo); } /* end test_array_bkg() */ -/**************************************************************** -** -** test_compat(): Test array datatype compatibility code. -** Reads file containing old version of datatype object header -** messages for compound datatypes and verifies reading the older -** version of the is working correctly. -** -****************************************************************/ + +/*------------------------------------------------------------------------- + * Function: test_compat + * + * Purpose: Test array datatype compatibility code. + * + * Reads file containing old version of datatype object header + * messages for compound datatypes and verifies reading the older + * version of the is working correctly. + * + * Return: void + * + *------------------------------------------------------------------------- + */ static void test_compat(void) { const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */ - hid_t fid1; /* HDF5 File IDs */ - hid_t dataset; /* Dataset ID */ - hid_t tid1; /* Array Datatype ID */ - hid_t tid2; /* Datatype ID */ - hsize_t tdims1[] = {ARRAY1_DIM1}; - int ndims; /* Array rank for reading */ - hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ - H5T_class_t mclass; /* Datatype class for VL */ - int nmemb; /* Number of compound members */ - char *mname; /* Name of compound field */ - size_t off; /* Offset of compound field */ - hid_t mtid; /* Datatype ID for field */ - int i; /* Index variables */ - herr_t ret; /* Generic return value */ + hid_t fid1; /* HDF5 File IDs */ + hid_t dataset; /* Dataset ID */ + hid_t tid1; /* Array Datatype ID */ + hid_t tid2; /* Datatype ID */ + hsize_t tdims1[] = {ARRAY1_DIM1}; + int ndims; /* Array rank for reading */ + hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */ + H5T_class_t mclass; /* Datatype class for VL */ + int nmemb; /* Number of compound members */ + char *mname; /* Name of compound field */ + size_t off; /* Offset of compound field */ + hid_t mtid; /* Datatype ID for field */ + int i; /* Index variables */ + herr_t ret; /* Generic return value */ /* Output message about test being performed */ MESSAGE(5, ("Testing Array Datatypes Compatibility Functionality\n")); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index e6b1788..5fe4b3b 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -2515,6 +2515,8 @@ compress_readAll(void) int rank=1; /* Dataspace rank */ hsize_t dim=dim0; /* Dataspace dimensions */ unsigned u; /* Local index variable */ + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ DATATYPE *data_read = NULL; /* data buffer */ DATATYPE *data_orig = NULL; /* expected data buffer */ const char *filename; @@ -2541,116 +2543,132 @@ compress_readAll(void) for(u=0; u<dim;u++) data_orig[u]=u; - /* Process zero creates the file with a compressed, chunked dataset */ - if(mpi_rank==0) { - hsize_t chunk_dim; /* Chunk dimensions */ - - /* Create the file */ - fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY((fid > 0), "H5Fcreate succeeded"); - - /* Create property list for chunking and compression */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl > 0), "H5Pcreate succeeded"); - - ret = H5Pset_layout(dcpl, H5D_CHUNKED); - VRFY((ret >= 0), "H5Pset_layout succeeded"); - - /* Use eight chunks */ - chunk_dim = dim / 8; - ret = H5Pset_chunk(dcpl, rank, &chunk_dim); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); + /* Run test both with and without filters disabled on partial chunks */ + for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Process zero creates the file with a compressed, chunked dataset */ + if(mpi_rank==0) { + hsize_t chunk_dim; /* Chunk dimensions */ + + /* Create the file */ + fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid > 0), "H5Fcreate succeeded"); + + /* Create property list for chunking and compression */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl > 0), "H5Pcreate succeeded"); + + ret = H5Pset_layout(dcpl, H5D_CHUNKED); + VRFY((ret >= 0), "H5Pset_layout succeeded"); + + /* Use eight chunks */ + chunk_dim = dim / 8; + ret = H5Pset_chunk(dcpl, rank, &chunk_dim); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* Set chunk options appropriately */ + if(disable_partial_chunk_filters) { + ret = H5Pget_chunk_opts(dcpl, &chunk_opts); + VRFY((ret>=0),"H5Pget_chunk_opts succeeded"); + + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + + ret = H5Pset_chunk_opts(dcpl, chunk_opts); + VRFY((ret>=0),"H5Pset_chunk_opts succeeded"); + } /* end if */ + + ret = H5Pset_deflate(dcpl, 9); + VRFY((ret >= 0), "H5Pset_deflate succeeded"); + + /* Create dataspace */ + dataspace = H5Screate_simple(rank, &dim, NULL); + VRFY((dataspace > 0), "H5Screate_simple succeeded"); + + /* Create dataset */ + dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), "H5Dcreate2 succeeded"); + + /* Write compressed data */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Close objects */ + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Sclose(dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } - ret = H5Pset_deflate(dcpl, 9); - VRFY((ret >= 0), "H5Pset_deflate succeeded"); + /* Wait for file to be created */ + MPI_Barrier(comm); - /* Create dataspace */ - dataspace = H5Screate_simple(rank, &dim, NULL); - VRFY((dataspace > 0), "H5Screate_simple succeeded"); + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ - /* Create dataset */ - dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), "H5Dcreate2 succeeded"); + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); - /* Write compressed data */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); - VRFY((ret >= 0), "H5Dwrite succeeded"); + /* open the file collectively */ + fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); + VRFY((fid > 0), "H5Fopen succeeded"); - /* Close objects */ - ret = H5Pclose(dcpl); + /* Release file-access template */ + ret = H5Pclose(acc_tpl); VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Sclose(dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - /* Wait for file to be created */ - MPI_Barrier(comm); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); - VRFY((fid > 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - /* Open dataset with compressed chunks */ - dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); - VRFY((dataset > 0), "H5Dopen2 succeeded"); + /* Open dataset with compressed chunks */ + dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); + VRFY((dataset > 0), "H5Dopen2 succeeded"); - /* Try reading & writing data */ - if(dataset>0) { - /* Create dataset transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist > 0), "H5Pcreate succeeded"); + /* Try reading & writing data */ + if(dataset>0) { + /* Create dataset transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist > 0), "H5Pcreate succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret>= 0),"set independent IO collectively succeeded"); + } - /* Try reading the data */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + /* Try reading the data */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - /* Verify data read */ - for(u=0; u<dim; u++) - if(data_orig[u]!=data_read[u]) { - printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__, - (unsigned)u,data_orig[u],(unsigned)u,data_read[u]); - nerrors++; - } + /* Verify data read */ + for(u=0; u<dim; u++) + if(data_orig[u]!=data_read[u]) { + printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__, + (unsigned)u,data_orig[u],(unsigned)u,data_read[u]); + nerrors++; + } - /* Writing to the compressed, chunked dataset in parallel should fail */ - H5E_BEGIN_TRY { - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - } H5E_END_TRY; - VRFY((ret < 0), "H5Dwrite failed"); + /* Writing to the compressed, chunked dataset in parallel should fail */ + H5E_BEGIN_TRY { + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + } H5E_END_TRY; + VRFY((ret < 0), "H5Dwrite failed"); - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - } /* end if */ + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + } /* end if */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); + /* Close file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } /* end for */ /* release data buffers */ if(data_read) HDfree(data_read); @@ -3131,7 +3149,7 @@ test_actual_io_mode(int selection_mode) { "reading and writing are the same for actual_chunk_opt_mode"); /* Test values */ - if(actual_chunk_opt_mode_expected != (unsigned) -1 && actual_io_mode_expected != (unsigned) -1) { + if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) { sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name); VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name); @@ -3963,16 +3981,20 @@ dataset_atomicity(void) MPI_Barrier (comm); /* make sure setting atomicity fails on a serial file ID */ - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT); - VRFY((fid >= 0), "H5Fopen succeeed"); + /* file locking allows only one file open (serial) for writing */ + if(MAINPROCESS){ + fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT); + VRFY((fid >= 0), "H5Fopen succeeed"); + } /* should fail */ ret = H5Fset_mpi_atomicity (fid , TRUE); VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); + if(MAINPROCESS){ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } MPI_Barrier (comm); diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index 5e1cd04..44f3f11 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -213,6 +213,8 @@ test_filter_read(void) hid_t dc; /* HDF5 IDs */ const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ hsize_t null_size; /* Size of dataset without filters */ + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ herr_t hrc; const char *filename; hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ @@ -254,74 +256,104 @@ test_filter_read(void) hrc = H5Pclose (dc); VRFY(hrc>=0,"H5Pclose"); - /*---------------------------------------------------------- - * STEP 1: Test Fletcher32 Checksum by itself. - *---------------------------------------------------------- - */ - - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0,"H5Pset_filter"); + /* Run steps 1-3 both with and without filters disabled on partial chunks */ + for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Set chunk options appropriately */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc>=0,"H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0,"H5Pset_filter"); + hrc = H5Pset_chunk (dc, 2, chunk_size); + VRFY(hrc>=0,"H5Pset_filter"); - hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL); - VRFY(hrc>=0,"H5Pset_filter"); + hrc = H5Pget_chunk_opts(dc, &chunk_opts); + VRFY(hrc>=0,"H5Pget_chunk_opts"); - filter_read_internal(filename,dc,&fletcher32_size); - VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect."); + if(disable_partial_chunk_filters) + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + hrc = H5Pclose (dc); + VRFY(hrc>=0,"H5Pclose"); + /*---------------------------------------------------------- + * STEP 1: Test Fletcher32 Checksum by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_FLETCHER32 - /*---------------------------------------------------------- - * STEP 2: Test deflation by itself. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_DEFLATE + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc>=0,"H5Pset_filter"); - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + hrc = H5Pset_chunk (dc, 2, chunk_size); + VRFY(hrc>=0,"H5Pset_filter"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk_opts (dc, chunk_opts); + VRFY(hrc>=0,"H5Pset_chunk_opts"); - hrc = H5Pset_deflate (dc, 6); - VRFY(hrc>=0, "H5Pset_deflate"); + hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL); + VRFY(hrc>=0,"H5Pset_filter"); - filter_read_internal(filename,dc,&deflate_size); + filter_read_internal(filename,dc,&fletcher32_size); + VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect."); - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + /* Clean up objects used for this test */ + hrc = H5Pclose (dc); + VRFY(hrc>=0, "H5Pclose"); -#endif /* H5_HAVE_FILTER_DEFLATE */ +#endif /* H5_HAVE_FILTER_FLETCHER32 */ + /*---------------------------------------------------------- + * STEP 2: Test deflation by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_DEFLATE - /*---------------------------------------------------------- - * STEP 3: Test szip compression by itself. - *---------------------------------------------------------- - */ -#ifdef H5_HAVE_FILTER_SZIP - if(h5_szip_can_encode() == 1) { dc = H5Pcreate(H5P_DATASET_CREATE); VRFY(dc>=0, "H5Pcreate"); hrc = H5Pset_chunk (dc, 2, chunk_size); VRFY(hrc>=0, "H5Pset_chunk"); - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc>=0, "H5Pset_szip"); + hrc = H5Pset_chunk_opts (dc, chunk_opts); + VRFY(hrc>=0,"H5Pset_chunk_opts"); + + hrc = H5Pset_deflate (dc, 6); + VRFY(hrc>=0, "H5Pset_deflate"); - filter_read_internal(filename,dc,&szip_size); + filter_read_internal(filename,dc,&deflate_size); /* Clean up objects used for this test */ hrc = H5Pclose (dc); VRFY(hrc>=0, "H5Pclose"); - } + +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /*---------------------------------------------------------- + * STEP 3: Test szip compression by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_SZIP + if(h5_szip_can_encode() == 1) { + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc>=0, "H5Pcreate"); + + hrc = H5Pset_chunk (dc, 2, chunk_size); + VRFY(hrc>=0, "H5Pset_chunk"); + + hrc = H5Pset_chunk_opts (dc, chunk_opts); + VRFY(hrc>=0,"H5Pset_chunk_opts"); + + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc>=0, "H5Pset_szip"); + + filter_read_internal(filename,dc,&szip_size); + + /* Clean up objects used for this test */ + hrc = H5Pclose (dc); + VRFY(hrc>=0, "H5Pclose"); + } #endif /* H5_HAVE_FILTER_SZIP */ + } /* end for */ /*---------------------------------------------------------- diff --git a/tools/h5copy/CMakeTests.cmake b/tools/h5copy/CMakeTests.cmake index 9002e5a..4ef49ee 100644 --- a/tools/h5copy/CMakeTests.cmake +++ b/tools/h5copy/CMakeTests.cmake @@ -22,7 +22,7 @@ file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles") foreach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES}) - GET_FILENAME_COMPONENT(fname "${listfiles}" NAME) + get_filename_component(fname "${listfiles}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/${fname}") #message (STATUS " Copying ${listfiles}") add_custom_command ( diff --git a/tools/h5diff/CMakeTests.cmake b/tools/h5diff/CMakeTests.cmake index 0595edd..c815325 100644 --- a/tools/h5diff/CMakeTests.cmake +++ b/tools/h5diff/CMakeTests.cmake @@ -267,7 +267,7 @@ # copy test files from source to build dir # foreach (h5_tstfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES}) - GET_FILENAME_COMPONENT(fname "${h5_tstfiles}" NAME) + get_filename_component(fname "${h5_tstfiles}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/${fname}") #message (STATUS " Copying ${fname}") add_custom_command ( @@ -1115,8 +1115,10 @@ ADD_H5_TEST (h5diff_90 0 -v ${FILE2} ${FILE2}) ADD_H5_TEST (h5diff_100 1 -v ${FILE9} ${FILE10}) # 11. floating point comparison +# double value ADD_H5_TEST (h5diff_101 1 -v ${FILE1} ${FILE1} g1/d1 g1/d2) +# float value ADD_H5_TEST (h5diff_102 1 -v ${FILE1} ${FILE1} g1/fp1 g1/fp2) # with --use-system-epsilon for double value. expect less differences @@ -1361,7 +1363,7 @@ ADD_H5_TEST (h5diff_517 1 -v ${GRP_RECURSE1_EXT} ${GRP_RECURSE2_EXT1} /g1) ADD_H5_TEST (h5diff_518 0 -v --follow-symlinks ${GRP_RECURSE1_EXT} ${GRP_RECURSE2_EXT1} /g1) # ############################################################################## -# # Exclude path (--exclude-path) +# # Exclude objects (--exclude-path) # ############################################################################## # # Same structure, same names and different value. diff --git a/tools/h5diff/testfiles/h5diff_v1.txt b/tools/h5diff/testfiles/h5diff_v1.txt index 31a3eae..8210cf9 100644 --- a/tools/h5diff/testfiles/h5diff_v1.txt +++ b/tools/h5diff/testfiles/h5diff_v1.txt @@ -7,7 +7,7 @@ file1 file2 group : </> and </> 0 differences found dataset: </vds_dset> and </vds_dset> -Not comparable: </vds_dset> or </vds_dset> is an empty dataset +Warning: </vds_dset> or </vds_dset> is a virtual dataset Not comparable: </vds_dset> has rank 3, dimensions [5x18x8], max dimensions [18446744073709551615x18x8] and </vds_dset> has rank 3, dimensions [6x8x14], max dimensions [18446744073709551615x8x14] 0 differences found diff --git a/tools/h5diff/testfiles/h5diff_v3.txt b/tools/h5diff/testfiles/h5diff_v3.txt index 8c5d2a2..57af303 100644 --- a/tools/h5diff/testfiles/h5diff_v3.txt +++ b/tools/h5diff/testfiles/h5diff_v3.txt @@ -1,4 +1,4 @@ -Not comparable: </vds_dset> or </vds_dset> is an empty dataset +Warning: </vds_dset> or </vds_dset> is a virtual dataset Not comparable: </vds_dset> has rank 3, dimensions [5x18x8], max dimensions [18446744073709551615x18x8] and </vds_dset> has rank 3, dimensions [6x8x14], max dimensions [18446744073709551615x8x14] EXIT CODE: 0 diff --git a/tools/h5dump/CMakeTests.cmake b/tools/h5dump/CMakeTests.cmake index 9f7e2b0..7897311 100644 --- a/tools/h5dump/CMakeTests.cmake +++ b/tools/h5dump/CMakeTests.cmake @@ -1141,10 +1141,10 @@ ADD_H5_TEST_N (tattr-2 0 --enable-error-stack -N /\\\\/attr1 --any_path /attr4 --any_path=/attr5 tattr.h5) # test for header and error messages ADD_H5ERR_MASK_TEST (tattr-3 1 --enable-error-stack --header -a /attr2 --attribute=/attr tattr.h5) - # test for displaying attributes in shared datatype (also in group and dataset) - ADD_H5_TEST (tnamed_dtype_attr 0 --enable-error-stack tnamed_dtype_attr.h5) # test for displaying at least 9 attributes on root from a be machine ADD_H5_TEST (tattr-4_be 0 --enable-error-stack tattr4_be.h5) + # test for displaying attributes in shared datatype (also in group and dataset) + ADD_H5_TEST (tnamed_dtype_attr 0 --enable-error-stack tnamed_dtype_attr.h5) # test for displaying soft links and user-defined links ADD_H5_TEST (tslink-1 0 --enable-error-stack tslink.h5) @@ -1423,6 +1423,7 @@ # test for dataset region references ADD_H5_TEST (tdatareg 0 --enable-error-stack tdatareg.h5) ADD_H5ERR_MASK_TEST (tdataregR 0 --enable-error-stack -R tdatareg.h5) + ADD_H5_TEST (tattrreg 0 --enable-error-stack tattrreg.h5) ADD_H5ERR_MASK_TEST (tattrregR 0 -R --enable-error-stack tattrreg.h5) ADD_H5_EXPORT_TEST (tbinregR tdatareg.h5 0 --enable-error-stack -d /Dataset1 -s 0 -R -y -o) diff --git a/tools/h5dump/CMakeTestsPBITS.cmake b/tools/h5dump/CMakeTestsPBITS.cmake index e76fa0e..745d5b1 100644 --- a/tools/h5dump/CMakeTestsPBITS.cmake +++ b/tools/h5dump/CMakeTestsPBITS.cmake @@ -86,7 +86,7 @@ ) foreach (pbits_h5_file ${HDF5_REFERENCE_TEST_PBITS}) - GET_FILENAME_COMPONENT(fname "${pbits_h5_file}" NAME) + get_filename_component(fname "${pbits_h5_file}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/pbits/${fname}") #message (STATUS " Copying ${pbits_h5_file}") add_custom_command ( @@ -99,7 +99,7 @@ foreach (ddl_pbits ${HDF5_REFERENCE_PBITS}) - GET_FILENAME_COMPONENT(fname "${ddl_pbits}" NAME) + get_filename_component(fname "${ddl_pbits}" NAME) set (ddldest "${PROJECT_BINARY_DIR}/testfiles/pbits/${fname}") #message (STATUS " Copying ${ddl_pbits}") add_custom_command ( @@ -111,7 +111,7 @@ endforeach (ddl_pbits ${HDF5_REFERENCE_PBITS}) foreach (ddl_pbits ${HDF5_ERROR_REFERENCE_PBITS}) - GET_FILENAME_COMPONENT(fname "${ddl_pbits}" NAME) + get_filename_component(fname "${ddl_pbits}" NAME) set (ddldest "${PROJECT_BINARY_DIR}/testfiles/pbits/${fname}") #message (STATUS " Copying ${ddl_pbits}") add_custom_command ( diff --git a/tools/h5dump/CMakeTestsVDS.cmake b/tools/h5dump/CMakeTestsVDS.cmake index cc68896..dcb90e0 100644 --- a/tools/h5dump/CMakeTestsVDS.cmake +++ b/tools/h5dump/CMakeTestsVDS.cmake @@ -65,7 +65,7 @@ ) foreach (vds_h5_file ${HDF5_REFERENCE_TEST_VDS}) - GET_FILENAME_COMPONENT(fname "${vds_h5_file}" NAME) + get_filename_component(fname "${vds_h5_file}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}") #message (STATUS " Copying ${vds_h5_file}") add_custom_command ( @@ -78,7 +78,7 @@ foreach (ddl_vds ${HDF5_REFERENCE_VDS}) - GET_FILENAME_COMPONENT(fname "${ddl_vds}" NAME) + get_filename_component(fname "${ddl_vds}" NAME) set (ddldest "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}") #message (STATUS " Copying ${ddl_vds}") add_custom_command ( @@ -90,7 +90,7 @@ endforeach (ddl_vds ${HDF5_REFERENCE_VDS}) foreach (ddl_vds ${HDF5_ERROR_REFERENCE_VDS}) - GET_FILENAME_COMPONENT(fname "${ddl_vds}" NAME) + get_filename_component(fname "${ddl_vds}" NAME) set (ddldest "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}") #message (STATUS " Copying ${ddl_vds}") add_custom_command ( @@ -111,7 +111,7 @@ # If using memchecker add tests without using scripts if (HDF5_ENABLE_USING_MEMCHECKER) add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> ${ARGN}) - set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfilesvds") + set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/vds") if (NOT ${resultcode} STREQUAL "0") set_tests_properties (H5DUMP-${resultfile} PROPERTIES WILL_FAIL "true") endif (NOT ${resultcode} STREQUAL "0") @@ -144,7 +144,7 @@ # If using memchecker add tests without using scripts if (HDF5_ENABLE_USING_MEMCHECKER) add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> -p ${ARGN}) - set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfilesvds") + set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/vds") if (NOT ${resultcode} STREQUAL "0") set_tests_properties (H5DUMP-${resultfile} PROPERTIES WILL_FAIL "true") endif (NOT ${resultcode} STREQUAL "0") diff --git a/tools/h5dump/CMakeTestsXML.cmake b/tools/h5dump/CMakeTestsXML.cmake index 0a667c4..fc6cae7 100644 --- a/tools/h5dump/CMakeTestsXML.cmake +++ b/tools/h5dump/CMakeTestsXML.cmake @@ -128,7 +128,7 @@ ) foreach (tst_xml_h5_file ${HDF5_XML_REFERENCE_TEST_FILES}) - GET_FILENAME_COMPONENT(fname "${tst_xml_h5_file}" NAME) + get_filename_component(fname "${tst_xml_h5_file}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/xml/${fname}") #message (STATUS " Copying ${tst_xml_h5_file}") add_custom_command ( @@ -140,7 +140,7 @@ endforeach (tst_xml_h5_file ${HDF5_XML_REFERENCE_TEST_FILES}) foreach (tst_xml_other_file ${HDF5_XML_REFERENCE_FILES}) - GET_FILENAME_COMPONENT(fname "${tst_xml_other_file}" NAME) + get_filename_component(fname "${tst_xml_other_file}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/xml/${fname}") #message (STATUS " Copying ${tst_xml_other_file}") add_custom_command ( diff --git a/tools/h5ls/CMakeTests.cmake b/tools/h5ls/CMakeTests.cmake index a42fa17..c5aff21 100644 --- a/tools/h5ls/CMakeTests.cmake +++ b/tools/h5ls/CMakeTests.cmake @@ -96,7 +96,7 @@ # copy the list of test files foreach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES}) - GET_FILENAME_COMPONENT(fname "${listfiles}" NAME) + get_filename_component(fname "${listfiles}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/${fname}") #message (STATUS " Copying ${listfiles} to ${dest}") add_custom_command ( diff --git a/tools/h5ls/CMakeTestsVDS.cmake b/tools/h5ls/CMakeTestsVDS.cmake index 1ef3f20..3e85803 100644 --- a/tools/h5ls/CMakeTestsVDS.cmake +++ b/tools/h5ls/CMakeTestsVDS.cmake @@ -47,7 +47,7 @@ # copy the list of test files foreach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES}) - GET_FILENAME_COMPONENT(fname "${listfiles}" NAME) + get_filename_component(fname "${listfiles}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}") #message (STATUS " Copying ${listfiles} to ${dest}") add_custom_command ( diff --git a/tools/h5repack/CMakeTests.cmake b/tools/h5repack/CMakeTests.cmake index 8aee98f..ba59bd2 100644 --- a/tools/h5repack/CMakeTests.cmake +++ b/tools/h5repack/CMakeTests.cmake @@ -118,7 +118,7 @@ ) foreach (h5_file ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES}) - GET_FILENAME_COMPONENT(fname "${h5_file}" NAME) + get_filename_component(fname "${h5_file}" NAME) set (dest "${PROJECT_BINARY_DIR}/testfiles/${fname}") #message (STATUS " Copying ${h5_file}") add_custom_command ( @@ -345,6 +345,41 @@ endif ("${testtype}" STREQUAL "SKIP") ENDMACRO (ADD_H5_VERIFY_TEST) + MACRO (ADD_H5_VERIFY_VDS testname testtype resultcode testfile testdset testfilter) + if ("${testtype}" STREQUAL "SKIP") + if (NOT HDF5_ENABLE_USING_MEMCHECKER) + add_test ( + NAME H5REPACK_VERIFY_LAYOUT-${testname}-SKIPPED + COMMAND ${CMAKE_COMMAND} -E echo "SKIP -d ${testdset} -pH ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile}" + ) + endif (NOT HDF5_ENABLE_USING_MEMCHECKER) + else ("${testtype}" STREQUAL "SKIP") + if (NOT HDF5_ENABLE_USING_MEMCHECKER) + add_test ( + NAME H5REPACK_VERIFY_LAYOUT-${testname} + COMMAND $<TARGET_FILE:h5repack> ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${testfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile} + ) + set_tests_properties (H5REPACK_VERIFY_LAYOUT-${testname} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles") + if (NOT "${last_test}" STREQUAL "") + set_tests_properties (H5REPACK_VERIFY_LAYOUT-${testname} PROPERTIES DEPENDS ${last_test}) + endif (NOT "${last_test}" STREQUAL "") + add_test ( + NAME H5REPACK_VERIFY_LAYOUT-${testname}_DMP + COMMAND "${CMAKE_COMMAND}" + -D "TEST_PROGRAM=$<TARGET_FILE:h5dump>" + -D "TEST_ARGS:STRING=-d;${testdset};-p;out-${testname}.${testfile}" + -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles" + -D "TEST_OUTPUT=${testfile}-${testname}-v.out" + -D "TEST_EXPECT=${resultcode}" + -D "TEST_REFERENCE=${testfile}-${testname}-v.ddl" + -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake" + ) + set_tests_properties (H5REPACK_VERIFY_LAYOUT-${testname}_DMP PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles") + set_tests_properties (H5REPACK_VERIFY_LAYOUT-${testname}_DMP PROPERTIES DEPENDS H5REPACK_VERIFY_LAYOUT-${testname}) + endif (NOT HDF5_ENABLE_USING_MEMCHECKER) + endif ("${testtype}" STREQUAL "SKIP") + ENDMACRO (ADD_H5_VERIFY_VDS) + MACRO (ADD_H5_TEST_META testname testfile) add_test ( NAME H5REPACK_META-${testname}_N @@ -1053,20 +1088,22 @@ ######################################################### # layout options ######################################################### - ADD_H5_VERIFY_TEST (vds_dset_conti "TEST" 0 ${FILEV1} vds_dset CONTIGUOUS -l vds_dset:CONTI) - ADD_H5_VERIFY_TEST (vds_null_conti "TEST" 1 ${FILEV2} null CONTIGUOUS -l CONTI) - ADD_H5_VERIFY_TEST (vds_dset_compa "TEST" 0 ${FILEV1} vds_dset COMPACT -l vds_dset:COMPA) - ADD_H5_VERIFY_TEST (vds_null_compa "TEST" 1 ${FILEV2} null COMPACT -l COMPA) +# skip tests because of HDFFV-9756 + ADD_H5_VERIFY_VDS (vds_dset_conti "SKIP" 0 ${FILEV1} vds_dset CONTIGUOUS -l vds_dset:CONTI) + ADD_H5_VERIFY_VDS (vds_null_conti "SKIP" 1 ${FILEV2} null CONTIGUOUS -l CONTI) + ADD_H5_VERIFY_VDS (vds_dset_compa "SKIP" 0 ${FILEV1} vds_dset COMPACT -l vds_dset:COMPA) + ADD_H5_VERIFY_VDS (vds_null_compa "SKIP" 1 ${FILEV2} null COMPACT -l COMPA) ################################################################ # layout conversions ############################################################### - ADD_H5_VERIFY_TEST (vds_compa_conti "TEST" 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI) - ADD_H5_VERIFY_TEST (vds_compa_compa "TEST" 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA) - ADD_H5_VERIFY_TEST (vds_conti_compa "TEST" 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA) - ADD_H5_VERIFY_TEST (vds_conti_conti "TEST" 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI) - ADD_H5_VERIFY_TEST (vds_compa "TEST" 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA) - ADD_H5_VERIFY_TEST (vds_conti "TEST" 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI) +# skip tests because of HDFFV-9756 + ADD_H5_VERIFY_VDS (vds_compa_conti "SKIP" 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI) + ADD_H5_VERIFY_VDS (vds_compa_compa "SKIP" 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA) + ADD_H5_VERIFY_VDS (vds_conti_compa "SKIP" 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA) + ADD_H5_VERIFY_VDS (vds_conti_conti "SKIP" 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI) + ADD_H5_VERIFY_VDS (vds_compa "SKIP" 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA) + ADD_H5_VERIFY_VDS (vds_conti "SKIP" 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI) ############################################################################## ### P L U G I N T E S T S diff --git a/tools/h5repack/h5repack.sh.in b/tools/h5repack/h5repack.sh.in index 24298d0..74da4c0 100644 --- a/tools/h5repack/h5repack.sh.in +++ b/tools/h5repack/h5repack.sh.in @@ -39,6 +39,8 @@ H5DUMP=../h5dump/h5dump # The h5dump tool name H5DUMP_BIN=`pwd`/$H5DUMP # The path of the h5dump tool binary RM='rm -rf' +CMP='cmp' +DIFF='diff -c' GREP='grep' CP='cp' DIRNAME='dirname' @@ -171,7 +173,7 @@ COPY_TESTFILES_TO_TESTDIR() INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then - $CP -f $tstfile $TESTDIR + $CP -f $tstfile $TESTDIR if [ $? -ne 0 ]; then echo "Error: FAILED to copy $tstfile ." @@ -188,7 +190,7 @@ CLEAN_TESTFILES_AND_TESTDIR() # skip rm if srcdir is same as destdir # this occurs when build/test performed in source dir and # make cp fail - SDIR=`$DIRNAME $tstfile` + SDIR=`$DIRNAME $SRC_H5REPACK_TESTFILES/h5repack-help.txt` INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'` INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'` if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then @@ -321,6 +323,70 @@ VERIFY_LAYOUT_DSET() rm -f $layoutfile } +#------------------------------------------ +# Verifying layouts of a dataset +VERIFY_LAYOUT_VDS() +{ + layoutfile=layout-$1.$2 + dset=$3 + expectlayout=$4 + infile=$2 + outfile=out-$1.$2 + + expect="$TESTDIR/$2-$1-v.ddl" + actual="$TESTDIR/$2-$1-v.out" + actual_err="$TESTDIR/$2-$1-v.err" + + shift + shift + shift + shift + + TESTING $H5REPACK $@ + ( + cd $TESTDIR + $RUNSERIAL $H5REPACK_BIN "$@" $infile $outfile + ) + RET=$? + if [ $RET != 0 ] ; then + echo "*FAILED*" + nerrors="`expr $nerrors + 1`" + else + echo " PASSED" + fi + + #--------------------------------- + # check the layout from a dataset + VERIFY "a dataset layout" + ( + cd $TESTDIR + $RUNSERIAL $H5DUMP_BIN -d $dset -p $outfile + ) >$actual 2>$actual_err + + cat $actual_err >> $actual + + if [ ! -f $expect ]; then + # Create the expect file if it doesn't yet exist. + echo " CREATED" + cp $actual $expect + elif $CMP $expect $actual > /dev/null 2>&1 ; then + echo " PASSED" + else + echo "*FAILED*" + echo " Expected result (*.ddl) differs from actual result (*.out)" + nerrors="`expr $nerrors + 1`" + test yes = "$verbose" && $DIFF $caseless $expect $actual |sed 's/^/ /' + fi + + # Clean up output file + if test -z "$HDF5_NOCLEANUP"; then + rm -f $actual $actual_err + fi + + # clean up tmp files + rm -f $outfile +} + #---------------------------------------- # Verifying layouts from entire file VERIFY_LAYOUT_ALL() @@ -469,11 +535,11 @@ TOOLTEST1() ) RET=$? if [ $RET != 0 ] ; then - echo "*FAILED*" - nerrors="`expr $nerrors + 1`" + echo "*FAILED*" + nerrors="`expr $nerrors + 1`" else - echo " PASSED" - DIFFTEST $infile $outfile + echo " PASSED" + DIFFTEST $infile $outfile fi rm -f $outfile } @@ -500,11 +566,11 @@ TOOLTESTV() ) >$actual 2>$actual_err RET=$? if [ $RET != 0 ] ; then - echo "*FAILED*" - nerrors="`expr $nerrors + 1`" + echo "*FAILED*" + nerrors="`expr $nerrors + 1`" else - echo " PASSED" - DIFFTEST $infile $outfile + echo " PASSED" + DIFFTEST $infile $outfile fi # display output compare @@ -634,11 +700,11 @@ TOOLTEST_META() # verify sizes. MESSAGE "Verify the sizes of both output files ($size1 vs $size2)" if [ $size1 -lt $size2 ]; then - # pass - echo " PASSED" + # pass + echo " PASSED" else - #fail - echo "*FAILED*" + #fail + echo "*FAILED*" nerrors="`expr $nerrors + 1`" fi @@ -711,7 +777,7 @@ USE_FILTER_SZIP_ENCODER=`$RUNSERIAL $H5DETECTSZIP_BIN` fi ############################################################################## -### T H E T E S T S +### T H E T E S T S ############################################################################## # prepare for test COPY_TESTFILES_TO_TESTDIR @@ -1057,7 +1123,7 @@ TOOLTEST add_alignment $arg # Check repacking file with old version of layout message (should get upgraded # to new version and be readable, etc.) -TOOLTEST pgrade_layout h5repack_layouto.h5 +TOOLTEST upgrade_layout h5repack_layouto.h5 # test for datum size > H5TOOLS_MALLOCSIZE TOOLTEST gt_mallocsize h5repack_objs.h5 -f GZIP=1 @@ -1091,20 +1157,33 @@ TOOLTEST_META meta_long h5repack_layout.h5 --metadata_block_size=8192 ######################################################### # layout options ######################################################### -VERIFY_LAYOUT_DSET vds_dset_conti 1_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI -VERIFY_LAYOUT_ALL vds_null_conti 2_vds.h5 CONTIGUOUS -l CONTI -VERIFY_LAYOUT_DSET vds_dset_compa 1_vds.h5 vds_dset COMPACT -l vds_dset:COMPA -VERIFY_LAYOUT_ALL vds_null_compa 2_vds.h5 COMPACT -l COMPA +# skip tests because of HDFFV-9756 +#VERIFY_LAYOUT_VDS vds_dset_conti 1_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +#VERIFY_LAYOUT_ALL vds_null_conti 2_vds.h5 CONTIGUOUS -l CONTI +# skip test because of HDFFV-9756 +#VERIFY_LAYOUT_VDS vds_dset_compa 1_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +#VERIFY_LAYOUT_ALL vds_null_compa 2_vds.h5 COMPACT -l COMPA +SKIP vds_dset_conti 1_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +SKIP vds_null_conti 2_vds.h5 CONTIGUOUS -l CONTI +SKIP vds_dset_compa 1_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +SKIP vds_null_compa 2_vds.h5 COMPACT -l COMPA ################################################################ # layout conversions ############################################################### -VERIFY_LAYOUT_DSET vds_compa_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI -VERIFY_LAYOUT_DSET vds_compa_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA -VERIFY_LAYOUT_DSET vds_conti_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA -VERIFY_LAYOUT_DSET vds_conti_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI -VERIFY_LAYOUT_DSET vds_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA -VERIFY_LAYOUT_DSET vds_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +# skip tests because of HDFFV-9756 +#VERIFY_LAYOUT_VDS vds_compa_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +#VERIFY_LAYOUT_VDS vds_compa_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +#VERIFY_LAYOUT_VDS vds_conti_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +#VERIFY_LAYOUT_VDS vds_conti_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +#VERIFY_LAYOUT_VDS vds_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +#VERIFY_LAYOUT_VDS vds_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +SKIP vds_compa_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +SKIP vds_compa_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +SKIP vds_conti_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +SKIP vds_conti_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI +SKIP vds_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA +SKIP vds_conti 4_vds.h5 vds_dset CONTIGUOUS -l vds_dset:CONTI # Clean up temporary files/directories CLEAN_TESTFILES_AND_TESTDIR diff --git a/tools/lib/h5diff_dset.c b/tools/lib/h5diff_dset.c index b8dd0e8..63f1483 100644 --- a/tools/lib/h5diff_dset.c +++ b/tools/lib/h5diff_dset.c @@ -71,9 +71,7 @@ hsize_t diff_dataset( hid_t file1_id, if((dcpl1 = H5Dget_create_plist(did1)) < 0) goto error; if((dcpl2 = H5Dget_create_plist(did2)) < 0) - { goto error; - } /*------------------------------------------------------------------------- * check if the dataset creation property list has filters that @@ -191,6 +189,10 @@ hsize_t diff_datasetid( hid_t did1, hid_t f_tid2=-1; hid_t m_tid1=-1; hid_t m_tid2=-1; + hid_t dcpl1 = -1; + hid_t dcpl2 = -1; + H5D_layout_t stl1 = -1; + H5D_layout_t stl2 = -1; size_t m_size1; size_t m_size2; H5T_sign_t sign1; @@ -260,6 +262,21 @@ hsize_t diff_datasetid( hid_t did1, goto error; } + + /*------------------------------------------------------------------------- + * get the storage layout type + *------------------------------------------------------------------------- + */ + if((dcpl1 = H5Dget_create_plist(did1)) < 0) + goto error; + if((dcpl2 = H5Dget_create_plist(did2)) < 0) + goto error; + + if((stl1 = H5Pget_layout(dcpl1)) < 0) + goto error; + if((stl2 = H5Pget_layout(dcpl2)) < 0) + goto error; + /*------------------------------------------------------------------------- * check for empty datasets *------------------------------------------------------------------------- @@ -271,10 +288,18 @@ hsize_t diff_datasetid( hid_t did1, if (storage_size1==0 || storage_size2==0) { - if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name) - parallel_print("Not comparable: <%s> or <%s> is an empty dataset\n", obj1_name, obj2_name); - can_compare=0; - options->not_cmp=1; + if (stl1==H5D_VIRTUAL || stl2==H5D_VIRTUAL) + { + if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name) + parallel_print("Warning: <%s> or <%s> is a virtual dataset\n", obj1_name, obj2_name); + } + else + { + if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name) + parallel_print("Not comparable: <%s> or <%s> is an empty dataset\n", obj1_name, obj2_name); + can_compare=0; + options->not_cmp=1; + } } /*------------------------------------------------------------------------- @@ -329,7 +354,7 @@ hsize_t diff_datasetid( hid_t did1, parallel_print("Not comparable: <%s> has sign %s ", obj1_name, get_sign(sign1)); parallel_print("and <%s> has sign %s\n", obj2_name, get_sign(sign2)); } - + can_compare=0; options->not_cmp=1; } @@ -369,7 +394,7 @@ hsize_t diff_datasetid( hid_t did1, h5difftrace("upgrade the smaller memory size?\n"); if (FAIL == match_up_memsize (f_tid1, f_tid2, - &m_tid1, &m_tid2, + &m_tid1, &m_tid2, &m_size1, &m_size2)) goto error; diff --git a/tools/misc/h5debug.c b/tools/misc/h5debug.c index cd1fdca..4df501a 100644 --- a/tools/misc/h5debug.c +++ b/tools/misc/h5debug.c @@ -121,6 +121,14 @@ get_H5B2_class(const uint8_t *sig) cls = H5A_BT2_CORDER; break; + case H5B2_CDSET_ID: + cls = H5D_BT2; + break; + + case H5B2_CDSET_FILT_ID: + cls = H5D_BT2_FILT; + break; + case H5B2_TEST2_ID: cls = H5B2_TEST2; break; @@ -161,6 +169,14 @@ get_H5EA_class(const uint8_t *sig) cls = H5EA_CLS_TEST; break; + case H5EA_CLS_CHUNK_ID: + cls = H5EA_CLS_CHUNK; + break; + + case H5EA_CLS_FILT_CHUNK_ID: + cls = H5EA_CLS_FILT_CHUNK; + break; + case H5EA_NUM_CLS_ID: default: HDfprintf(stderr, "Unknown extensible array class %u\n", (unsigned)(clsid)); @@ -197,6 +213,14 @@ get_H5FA_class(const uint8_t *sig) cls = H5FA_CLS_TEST; break; + case H5FA_CLS_CHUNK_ID: + cls = H5FA_CLS_CHUNK; + break; + + case H5FA_CLS_FILT_CHUNK_ID: + cls = H5FA_CLS_FILT_CHUNK; + break; + case H5FA_NUM_CLS_ID: default: HDfprintf(stderr, "Unknown fixed array class %u\n", (unsigned)(clsid)); @@ -402,6 +426,13 @@ main(int argc, char *argv[]) const H5B2_class_t *cls = get_H5B2_class(sig); HDassert(cls); + if((cls == H5D_BT2 || cls == H5D_BT2_FILT) && extra == 0) { + HDfprintf(stderr, "ERROR: Need v2 B-tree header address and object header address containing the layout message in order to dump header\n"); + HDfprintf(stderr, "v2 B-tree hdr usage:\n"); + HDfprintf(stderr, "\th5debug <filename> <v2 B-tree header address> <object header address>\n"); + HDexit(4); + } /* end if */ + status = H5B2__hdr_debug(f, H5AC_ind_read_dxpl_id, addr, stdout, 0, VCOL, cls, (haddr_t)extra); } else if(!HDmemcmp(sig, H5B2_INT_MAGIC, (size_t)H5_SIZEOF_MAGIC)) { @@ -412,7 +443,16 @@ main(int argc, char *argv[]) HDassert(cls); /* Check for enough valid parameters */ - if(extra == 0 || extra2 == 0 || extra3 == 0) { + if((cls == H5D_BT2 || cls == H5D_BT2_FILT) && + (extra == 0 || extra2 == 0 || extra3 == 0 || extra4 == 0)) { + + fprintf(stderr, "ERROR: Need v2 B-tree header address, the node's number of records, depth, and object header address containing the layout message in order to dump internal node\n"); + fprintf(stderr, "NOTE: Leaf nodes are depth 0, the internal nodes above them are depth 1, etc.\n"); + fprintf(stderr, "v2 B-tree internal node usage:\n"); + fprintf(stderr, "\th5debug <filename> <internal node address> <v2 B-tree header address> <number of records> <depth> <object header address>\n"); + HDexit(4); + + } else if(extra == 0 || extra2 == 0 || extra3 == 0) { HDfprintf(stderr, "ERROR: Need v2 B-tree header address and the node's number of records and depth in order to dump internal node\n"); HDfprintf(stderr, "NOTE: Leaf nodes are depth 0, the internal nodes above them are depth 1, etc.\n"); HDfprintf(stderr, "v2 B-tree internal node usage:\n"); @@ -430,7 +470,15 @@ main(int argc, char *argv[]) HDassert(cls); /* Check for enough valid parameters */ - if(extra == 0 || extra2 == 0) { + if((cls == H5D_BT2 || cls == H5D_BT2_FILT) && + (extra == 0 || extra2 == 0 || extra3 == 0 )) { + + fprintf(stderr, "ERROR: Need v2 B-tree header address, number of records, and object header address containing the layout message in order to dump leaf node\n"); + fprintf(stderr, "v2 B-tree leaf node usage:\n"); + fprintf(stderr, "\th5debug <filename> <leaf node address> <v2 B-tree header address> <number of records> <object header address>\n"); + HDexit(4); + + } else if(extra == 0 || extra2 == 0) { HDfprintf(stderr, "ERROR: Need v2 B-tree header address and number of records in order to dump leaf node\n"); HDfprintf(stderr, "v2 B-tree leaf node usage:\n"); HDfprintf(stderr, "\th5debug <filename> <leaf node address> <v2 B-tree header address> <number of records>\n"); |