summaryrefslogtreecommitdiffstats
path: root/testpar/t_mdset.c
diff options
context:
space:
mode:
authorSean McBride <sean@rogue-research.com>2022-06-22 18:58:18 (GMT)
committerGitHub <noreply@github.com>2022-06-22 18:58:18 (GMT)
commitc064d3481b582653c1e0d0043a17527fd73e8c4d (patch)
tree396a5e13954c19b803aa80465938cde15140b176 /testpar/t_mdset.c
parentd6f05069c1a3642bbebf7ec27e7df809f0675f13 (diff)
downloadhdf5-c064d3481b582653c1e0d0043a17527fd73e8c4d.zip
hdf5-c064d3481b582653c1e0d0043a17527fd73e8c4d.tar.gz
hdf5-c064d3481b582653c1e0d0043a17527fd73e8c4d.tar.bz2
sprintf to snprintf (#1815)
* Straightforward conversion of sprintf to the safer snprintf * Trickier conversion of sprintf to safer snprintf This involved minor changes to private function signatures to take the size of the buffer. * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
Diffstat (limited to 'testpar/t_mdset.c')
-rw-r--r--testpar/t_mdset.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 339ca33..54d75e6 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -183,7 +183,7 @@ multiple_dset_write(void)
VRFY((ret >= 0), "set fill-value succeeded");
for (n = 0; n < ndatasets; n++) {
- HDsprintf(dname, "dataset %d", n);
+ HDsnprintf(dname, sizeof(dname), "dataset %d", n);
dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset > 0), dname);
@@ -878,11 +878,11 @@ collective_group_write(void)
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
for (m = 0; m < ngroups; m++) {
- HDsprintf(gname, "group%d", m);
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
- HDsprintf(dname, "dataset%d", m);
+ HDsnprintf(dname, sizeof(dname), "dataset%d", m);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -979,12 +979,12 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
/* open every group under root group. */
- HDsprintf(gname, "group%d", m);
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
- HDsprintf(dname, "dataset%d", m);
+ HDsnprintf(dname, sizeof(dname), "dataset%d", m);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1080,7 +1080,7 @@ multiple_group_write(void)
/* creates ngroups groups under the root group, writes datasets in
* parallel. */
for (m = 0; m < ngroups; m++) {
- HDsprintf(gname, "group%d", m);
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -1136,7 +1136,7 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outme != NULL), "HDmalloc succeeded for outme");
for (n = 0; n < NDATASET; n++) {
- HDsprintf(dname, "dataset%d", n);
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1174,7 +1174,7 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
}
#endif /* BARRIER_CHECKS */
- HDsprintf(gname, "%dth_child_group", counter + 1);
+ HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
@@ -1228,7 +1228,7 @@ multiple_group_read(void)
/* open every group under root group. */
for (m = 0; m < ngroups; m++) {
- HDsprintf(gname, "group%d", m);
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
@@ -1285,7 +1285,7 @@ read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
for (n = 0; n < NDATASET; n++) {
- HDsprintf(dname, "dataset%d", n);
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -1336,7 +1336,7 @@ recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
nerrors += err_num;
if (counter < GROUP_DEPTH) {
- HDsprintf(gname, "%dth_child_group", counter + 1);
+ HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
recursive_read_group(memspace, filespace, child_gid, counter + 1);
@@ -1358,7 +1358,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (this_type == is_group) {
- HDsprintf(attr_name, "Group Attribute %d", num);
+ HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
H5Awrite(aid, H5T_NATIVE_INT, &num);
@@ -1366,7 +1366,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
H5Sclose(sid);
} /* end if */
else if (this_type == is_dset) {
- HDsprintf(attr_name, "Dataset Attribute %d", num);
+ HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
for (i = 0; i < 8; i++)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
@@ -1389,14 +1389,14 @@ read_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (this_type == is_group) {
- HDsprintf(attr_name, "Group Attribute %d", num);
+ HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
H5Aread(aid, H5T_NATIVE_INT, &in_num);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
H5Aclose(aid);
}
else if (this_type == is_dset) {
- HDsprintf(attr_name, "Dataset Attribute %d", num);
+ HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
for (i = 0; i < 8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);