summaryrefslogtreecommitdiffstats
path: root/src/H5Dchunk.c
diff options
context:
space:
mode:
authorJordan Henderson <jhenderson@hdfgroup.org>2018-07-16 14:37:54 (GMT)
committerJordan Henderson <jhenderson@hdfgroup.org>2018-07-16 14:37:54 (GMT)
commitf649be9fdc9add7a12aa5c8290b9bf8a45d49a56 (patch)
tree15953e85d2e4e79203c80ae12e264f1702c2ab32 /src/H5Dchunk.c
parent518f4af90058d44fa5557d1a3509afc947ec80d7 (diff)
parenta8d6f100cdddbfc42d0c4abfeb4ceb8788b1b087 (diff)
downloadhdf5-f649be9fdc9add7a12aa5c8290b9bf8a45d49a56.zip
hdf5-f649be9fdc9add7a12aa5c8290b9bf8a45d49a56.tar.gz
hdf5-f649be9fdc9add7a12aa5c8290b9bf8a45d49a56.tar.bz2
Merge pull request #1127 in HDFFV/hdf5 from ~JHENDERSON/hdf5:develop to develop
* commit 'a8d6f100cdddbfc42d0c4abfeb4ceb8788b1b087': Add note about single chunk caching and serial library Add check for actually using the MPI file driver when caching one chunk Fix error message mentioning wrong MPI function used Fix for HDFFV-10509 Revise H5D__mpio_array_gatherv() to not allocate memory needlessly Add test to continually grow and shrink chunks Changes to test with checksum filter as well as deflate filter Eliminate warning about signed to unsigned conversion Remove unused local variable Fix bug in parallel reads of compressed data Add data verification to parallel filtered compound write tests Add seven of fourteen parallel filtered data partial read tests
Diffstat (limited to 'src/H5Dchunk.c')
-rw-r--r--src/H5Dchunk.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index e3f6410..e64a60f 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -2933,8 +2933,41 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled,
H5F_set_coll_md_read(idx_info.f, temp_cmr);
#endif /* H5_HAVE_PARALLEL */
- /* Cache the information retrieved */
- H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata);
+ /*
+ * Cache the information retrieved.
+ *
+ * Note that if we are writing to the dataset in parallel and filters
+ * are involved, we skip caching this information as it is highly likely
+ * that the chunk information will be invalidated as a result of the
+ * filter operation (e.g. the chunk gets re-allocated to a different
+ * address in the file and/or gets re-allocated with a different size).
+ * If we were to cache this information, subsequent reads/writes would
+ * retrieve the invalid information and cause a variety of issues.
+ *
+ * It has been verified that in the serial library, when writing to chunks
+ * with the real chunk cache disabled and with filters involved, the
+ * functions within this file are correctly called in such a manner that
+ * this single chunk cache is always updated correctly. Therefore, this
+ * check is not needed for the serial library.
+ *
+ * This is an ugly and potentially frail check, but the
+ * H5D__chunk_cinfo_cache_reset() function is not currently available
+ * to functions outside of this file, so outside functions can not
+ * invalidate this single chunk cache. Even if the function were available,
+ * this check prevents us from doing the work of going through and caching
+ * each chunk in the write operation, when we're only going to invalidate
+ * the cache at the end of a parallel write anyway.
+ *
+ * - JTH (7/13/2018)
+ */
+#ifdef H5_HAVE_PARALLEL
+ if ( !( (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI))
+ && (H5F_INTENT(dset->oloc.file) & H5F_ACC_RDWR)
+ && dset->shared->dcpl_cache.pline.nused
+ )
+ )
+#endif
+ H5D__chunk_cinfo_cache_update(&dset->shared->cache.chunk.last, udata);
} /* end if */
} /* end else */