diff options
author | Mohamad Chaarawi <chaarawi@hdfgroup.org> | 2014-04-18 21:53:20 (GMT) |
---|---|---|
committer | Mohamad Chaarawi <chaarawi@hdfgroup.org> | 2014-04-18 21:53:20 (GMT) |
commit | 9e4ba4e7d726f6c894a6dffb87fa6ba23366b0dd (patch) | |
tree | aa454b73d355123ec5cac9b8b12c155cacad9864 | |
parent | f5fb140a11328e4528b64b91cf68ad5184055669 (diff) | |
download | hdf5-9e4ba4e7d726f6c894a6dffb87fa6ba23366b0dd.zip hdf5-9e4ba4e7d726f6c894a6dffb87fa6ba23366b0dd.tar.gz hdf5-9e4ba4e7d726f6c894a6dffb87fa6ba23366b0dd.tar.bz2 |
[svn-r25069] - fix some analysis shipping bugs
- fix bug in H5Dset_extent when used asynchronously
- fix bug in example where H5Dset_extent is used.
-rw-r--r-- | examples/h5ff_client_attr.c | 4 | ||||
-rw-r--r-- | examples/h5ff_client_dset.c | 117 | ||||
-rw-r--r-- | src/H5VLiod.c | 30 | ||||
-rw-r--r-- | src/H5VLiod_analysis.c | 155 |
4 files changed, 158 insertions, 148 deletions
diff --git a/examples/h5ff_client_attr.c b/examples/h5ff_client_attr.c index 572c721..dd2ca7c 100644 --- a/examples/h5ff_client_attr.c +++ b/examples/h5ff_client_attr.c @@ -190,6 +190,7 @@ int main(int argc, char **argv) { H5ESwait_all(e_stack, &status); H5ESclear(e_stack); printf("%d events in event stack. Completion status = %d\n", num_events, status); + assert(status == H5ES_STATUS_SUCCEED); if(0 == my_rank) { /* create transaction object */ @@ -238,6 +239,7 @@ int main(int argc, char **argv) { H5ESwait_all(e_stack, &status); H5ESclear(e_stack); printf("%d events in event stack. Completion status = %d\n", num_events, status); + assert(status == H5ES_STATUS_SUCCEED); /* Tell other procs that container version 3 is acquired */ MPI_Bcast(&version, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD); @@ -282,6 +284,7 @@ int main(int argc, char **argv) { H5ESwait_all(e_stack, &status); H5ESclear(e_stack); printf("%d events in event stack. Completion status = %d\n", num_events, status); + assert(status == H5ES_STATUS_SUCCEED); /* barrier so root knows all are done reading */ MPI_Barrier(MPI_COMM_WORLD); @@ -307,6 +310,7 @@ int main(int argc, char **argv) { H5ESwait_all(e_stack, &status); H5ESclear(e_stack); printf("%d events in event stack. Completion status = %d\n", num_events, status); + assert(status == H5ES_STATUS_SUCCEED); ret = H5RCclose(rid1); assert(0 == ret); diff --git a/examples/h5ff_client_dset.c b/examples/h5ff_client_dset.c index 5b36c79..01bfca2 100644 --- a/examples/h5ff_client_dset.c +++ b/examples/h5ff_client_dset.c @@ -45,7 +45,7 @@ int main(int argc, char **argv) { hid_t gid1, gid2, gid3; hid_t sid, scalar, dtid; hid_t did1, did2, did3; - hid_t tid1, tid2, tid3, rid1, rid2, rid3, rid4; + hid_t tid1, tid2, tid3, rid1, rid2, rid3; hid_t fapl_id, trspl_id, dxpl_id; hid_t e_stack; hid_t esid; @@ -134,15 +134,20 @@ int main(int argc, char **argv) { /* acquire container version 1 - EXACT. This can be asynchronous, but here we need the acquired ID right after the call to start the transaction so we make synchronous. */ - version = 1; - rid1 = H5RCacquire(file_id, &version, H5P_DEFAULT, H5_EVENT_STACK_NULL); + if(0 == my_rank) { + version = 1; + rid1 = H5RCacquire(file_id, &version, H5P_DEFAULT, H5_EVENT_STACK_NULL); + } + MPI_Bcast( &version, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD ); assert(1 == version); + if (my_rank != 0) + rid1 = H5RCcreate(file_id, version); /* create transaction object */ tid1 = H5TRcreate(file_id, rid1, (uint64_t)2); assert(tid1); - /* start transaction 1 with default Leader/Delegate model. Leader + /* start transaction 2 with default Leader/Delegate model. Leader which is rank 0 here starts the transaction. It can be asynchronous, but we make it synchronous here so that the Leader can tell its delegates that the transaction is @@ -319,8 +324,10 @@ int main(int argc, char **argv) { assert(0 == ret); /* release container version 1. This is async. */ - ret = H5RCrelease(rid1, e_stack); - assert(0 == ret); + if(0 == my_rank) { + ret = H5RCrelease(rid1, e_stack); + assert(0 == ret); + } H5ESget_count(e_stack, &num_events); H5ESwait_all(e_stack, &status); @@ -441,11 +448,10 @@ int main(int argc, char **argv) { printf("Rank %d read value %d\n", my_rank, element); assert(element == 450); -#if 0 - /* create & start transaction 2 with num_peers = my_size. This + /* create & start transaction 3 with num_peers = my_size. This means all processes are transaction leaders, and all have to call start and finish on the transaction. */ - tid2 = H5TRcreate(file_id, rid2, (uint64_t)2); + tid2 = H5TRcreate(file_id, rid2, (uint64_t)3); assert(tid2); trspl_id = H5Pcreate (H5P_TR_START); ret = H5Pset_trspl_num_peers(trspl_id, my_size); @@ -457,69 +463,34 @@ int main(int argc, char **argv) { /* Do more updates on transaction 2 */ - if(0 == my_rank) { + { extent = nelem+10; ret = H5Dset_extent_ff(did1, &extent, tid2, e_stack); assert(ret == 0); } - if((my_size > 1 && 1 == my_rank) || - (my_size == 1 && 0 == my_rank)) { - extent = 30; - ret = H5Dset_extent_ff(did2, &extent, tid2, e_stack); - assert(ret == 0); - } - - if((my_size > 2 && 2 == my_rank) || - (my_size == 1 && 0 == my_rank)) { - extent = 60; - ret = H5Dset_extent_ff(did3, &extent, tid2, e_stack); - assert(ret == 0); - } - - /* finish transaction 2 - all have to call */ - ret = H5TRfinish(tid2, H5P_DEFAULT, NULL, H5_EVENT_STACK_NULL); - assert(0 == ret); - H5ESget_count(e_stack, &num_events); H5ESwait_all(e_stack, &status); printf("%d events in event stack. H5ESwait_all Completion status = %d\n", num_events, status); H5ESclear(e_stack); MPI_Barrier(MPI_COMM_WORLD); - version = 2; - rid3 = H5RCacquire(file_id, &version, H5P_DEFAULT, H5_EVENT_STACK_NULL); - assert(2 == version); - - tid3 = H5TRcreate(file_id, rid3, (uint64_t)3); - assert(tid3); - trspl_id = H5Pcreate (H5P_TR_START); - ret = H5Pset_trspl_num_peers(trspl_id, my_size); - assert(0 == ret); - ret = H5TRstart(tid3, trspl_id, e_stack); - assert(0 == ret); - ret = H5Pclose(trspl_id); - assert(0 == ret); - esid = H5Dget_space(did1); - ex_wdata = malloc (sizeof(int32_t)*(nelem+10)); ex_rdata = malloc (sizeof(int32_t)*(nelem+10)); - if(0 == my_rank) { - for(i=0;i<extent;++i) { - ex_wdata[i] = i; - ex_rdata[i] = 0; - } - - ret = H5Dwrite_ff(did1, dtid, esid, esid, H5P_DEFAULT, - ex_wdata, tid3, e_stack); - assert(ret == 0); + for(i=0;i<nelem+10;++i) { + ex_wdata[i] = i; + ex_rdata[i] = 0; } + ret = H5Dwrite_ff(did1, dtid, esid, esid, H5P_DEFAULT, + ex_wdata, tid2, e_stack); + assert(ret == 0); + /* finish transaction 3 */ - ret = H5TRfinish(tid3, H5P_DEFAULT, NULL, H5_EVENT_STACK_NULL); + ret = H5TRfinish(tid2, H5P_DEFAULT, NULL, H5_EVENT_STACK_NULL); assert(0 == ret); H5ESget_count(e_stack, &num_events); @@ -529,36 +500,32 @@ int main(int argc, char **argv) { H5ESclear(e_stack); MPI_Barrier(MPI_COMM_WORLD); - version = 3; - rid4 = H5RCacquire(file_id, &version, H5P_DEFAULT, H5_EVENT_STACK_NULL); + if(0 == my_rank) { + version = 3; + rid3 = H5RCacquire(file_id, &version, H5P_DEFAULT, H5_EVENT_STACK_NULL); + } + MPI_Bcast( &version, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD ); assert(3 == version); + if (my_rank != 0) + rid3 = H5RCcreate(file_id, version); - { - esid = H5Dget_space(did1); - - ret = H5Dread_ff(did1, dtid, esid, esid, H5P_DEFAULT, ex_rdata, - rid4, H5_EVENT_STACK_NULL); - assert(ret == 0); - - printf("Printing all Extended Dataset values: "); - for(i=0 ; i<nelem+10 ; ++i) - printf("%d ", ex_rdata[i]); - printf("\n"); + ret = H5Dread_ff(did1, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, ex_rdata, + rid3, H5_EVENT_STACK_NULL); + assert(ret == 0); - H5Sclose(esid); + printf("Printing all Extended Dataset values: "); + for(i=0 ; i<nelem+10 ; ++i) { + printf("%d ", ex_rdata[i]); + assert(ex_rdata[i] == ex_wdata[i]); } + printf("\n"); - /* release container version 3. This is async. */ - ret = H5RCrelease(rid3, e_stack); - assert(0 == ret); - ret = H5RCrelease(rid4, e_stack); - assert(0 == ret); - -#endif + H5Sclose(esid); MPI_Barrier(MPI_COMM_WORLD); if(my_rank == 0) { - /* release container version 2. This is async. */ + ret = H5RCrelease(rid3, e_stack); + assert(0 == ret); ret = H5RCrelease(rid2, e_stack); assert(0 == ret); } diff --git a/src/H5VLiod.c b/src/H5VLiod.c index a4bc45f..145e0a2 100644 --- a/src/H5VLiod.c +++ b/src/H5VLiod.c @@ -3702,48 +3702,50 @@ H5VL_iod_dataset_set_extent(void *_dset, const hsize_t size[], FUNC_ENTER_NOAPI_NOINIT /* If there is information needed about the dataset that is not present locally, wait */ - if(-1 == dset->remote_dset.space_id) { + if(-1 == dset->remote_dset.dcpl_id || + -1 == dset->remote_dset.type_id || + -1 == dset->remote_dset.space_id) { /* Synchronously wait on the request attached to the dataset */ if(H5VL_iod_request_wait(dset->common.file, dset->common.request) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't wait on HG request"); dset->common.request = NULL; } + input.dims.rank = H5Sget_simple_extent_ndims(dset->remote_dset.space_id); + if(0 == input.dims.rank) + HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't extend dataset with scalar dataspace"); + /* get the transaction ID */ if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id))) HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID"); if(H5P_get(plist, H5VL_TRANS_ID, &trans_id) < 0) HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get property value for trans_id"); - /* get the TR object */ if(NULL == (tr = (H5TR_t *)H5I_object_verify(trans_id, H5I_TR))) HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a Transaction ID") - if(H5VL_iod_get_obj_requests((H5VL_iod_object_t *)dset, &num_parents, NULL) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't get num requests"); + if(NULL == (parent_reqs = (H5VL_iod_request_t **) + H5MM_malloc(sizeof(H5VL_iod_request_t *) * 2))) + HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, FAIL, "can't allocate parent req element"); - if(num_parents) { - if(NULL == (parent_reqs = (H5VL_iod_request_t **)H5MM_malloc - (sizeof(H5VL_iod_request_t *) * num_parents))) - HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, FAIL, "can't allocate array of parent reqs"); - if(H5VL_iod_get_obj_requests((H5VL_iod_object_t *)dset, &num_parents, - parent_reqs) < 0) - HGOTO_ERROR(H5E_SYM, H5E_CANTGET, FAIL, "can't get parent requests"); - } + /* retrieve parent requests */ + if(H5VL_iod_get_parent_requests((H5VL_iod_object_t *)dset, (H5VL_iod_req_info_t *)tr, + parent_reqs, &num_parents) < 0) + HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "Failed to retrieve parent requests"); /* Fill input structure */ input.coh = dset->common.file->remote_file.coh; input.iod_oh = dset->remote_dset.iod_oh; input.iod_id = dset->remote_dset.iod_id; input.mdkv_id = dset->remote_dset.mdkv_id; - input.dims.rank = H5Sget_simple_extent_ndims(dset->remote_dset.space_id); input.dims.size = size; input.trans_num = tr->trans_num; input.rcxt_num = tr->c_version; input.cs_scope = dset->common.file->md_integrity_scope; #if H5_EFF_DEBUG - printf("Dataset Set Extent, axe id %"PRIu64"\n", g_axe_id); + printf("Dataset Set Extent (rank %d size %zu), axe id %"PRIu64"\n", + input.dims.rank, input.dims.size[0], g_axe_id); #endif status = (int *)malloc(sizeof(int)); diff --git a/src/H5VLiod_analysis.c b/src/H5VLiod_analysis.c index 0db2590..3487b6e 100644 --- a/src/H5VLiod_analysis.c +++ b/src/H5VLiod_analysis.c @@ -431,25 +431,35 @@ H5VL__iod_combine(const char *combine_script, void **split_data, size_t *split_n PyObject *po_func = NULL, *po_numpy_arrays = NULL, *po_args_tup = NULL; PyObject *po_numpy_array_combine = NULL; - size_t i; + size_t count = 0; + size_t i, k = 0; if(!numpy_initialized) H5VL__iod_numpy_init(); + for(i = 0; i < num_targets; i++) { + if(0 != split_num_elmts[i]) + count ++; + } + /* Create numpy arrays */ - if(NULL == (po_numpy_arrays = PyList_New((Py_ssize_t) num_targets))) + if(NULL == (po_numpy_arrays = PyList_New((Py_ssize_t) count))) HGOTO_ERROR_FF(FAIL, "can't create list of arrays") + for(i = 0; i < num_targets; i++) { PyObject *po_numpy_array = NULL; int py_ret = 0; - if(NULL == (po_numpy_array = - H5VL__iod_create_numpy_array(split_num_elmts[i], - data_type_id, - split_data[i]))) - HGOTO_ERROR_FF(FAIL, "can't create numpy array from data") + if(0 != split_num_elmts[i]) { + if(NULL == (po_numpy_array = + H5VL__iod_create_numpy_array(split_num_elmts[i], + data_type_id, + split_data[i]))) + HGOTO_ERROR_FF(FAIL, "can't create numpy array from data") - if(0 != (py_ret = PyList_SetItem(po_numpy_arrays, (Py_ssize_t) i, po_numpy_array))) - HGOTO_ERROR_FF(FAIL, "can't set item to array list") + if(0 != (py_ret = PyList_SetItem(po_numpy_arrays, (Py_ssize_t) k, po_numpy_array))) + HGOTO_ERROR_FF(FAIL, "can't set item to array list") + k ++; + } } /* Load script */ @@ -477,7 +487,7 @@ done: /* Cleanup */ Py_XDECREF(po_func); Py_XDECREF(po_args_tup); - for(i = 0; i < num_targets; i++) { + for(i = 0; i < count; i++) { Py_XDECREF(po_numpy_arrays + i); } Py_XDECREF(po_numpy_array_combine); @@ -520,7 +530,9 @@ H5VL__iod_request_container_open(const char *file_name, iod_handle_t **cohs) } /* open the container */ - printf("(%d) Calling iod_container_open on %s\n", my_rank_g, file_name); +#if H5_EFF_DEBUG + fprintf(stderr, "(%d) Calling iod_container_open on %s\n", my_rank_g, file_name); +#endif ret = iod_container_open(file_name, NULL, IOD_CONT_R, &temp_cohs[0], NULL); if(ret < 0) HGOTO_ERROR_FF(ret, "can't open file"); @@ -606,7 +618,7 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, const char *split_script, const char *combine_script) { herr_t ret_value = SUCCEED; /* Return value */ - void **split_data; + void **split_data = NULL; size_t *split_num_elmts; void *combine_data; size_t combine_num_elmts; @@ -621,7 +633,8 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, if(NULL == (hg_reqs = (hg_request_t *) malloc(sizeof(hg_request_t) * num_targets))) HGOTO_ERROR_FF(FAIL, "can't allocate HG requests"); - if(NULL == (farm_output = (analysis_farm_out_t *) malloc(sizeof(analysis_farm_out_t) * num_targets))) + if(NULL == (farm_output = (analysis_farm_out_t *) malloc + (sizeof(analysis_farm_out_t) * num_targets))) HGOTO_ERROR_FF(FAIL, "can't allocate HG requests"); if(NULL == (split_data = (void **) malloc(sizeof(void *) * num_targets))) @@ -649,7 +662,9 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, if (0 == strcmp(obj_map->u_map.array_map.array_range[i].loc, server_loc_g[j])) { server_idx = j; - printf("(%d) Server %d owns this object\n", my_rank_g, server_idx); +#if H5_EFF_DEBUG + fprintf(stderr, "(%d) Server %d owns this object\n", my_rank_g, server_idx); +#endif break; } } @@ -676,7 +691,8 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, for (i = 0; i < num_targets; i++) { if (hg_reqs[i] == HG_REQUEST_NULL) { /* No request / was local */ - } else { + } + else { analysis_transfer_in_t transfer_input; analysis_transfer_out_t transfer_output; hg_bulk_t bulk_handle; @@ -687,35 +703,41 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, if(HG_Wait(hg_reqs[i], HG_MAX_IDLE_TIME, HG_STATUS_IGNORE) < 0) HGOTO_ERROR_FF(FAIL, "HG_Wait Failed"); - /* Get split type ID and num_elemts (all the arrays should have the same native type id) */ - server_idx = farm_output[i].server_idx; - split_type_id = farm_output[i].type_id; - split_num_elmts[i] = farm_output[i].num_elmts; - split_data_size = split_num_elmts[i] * H5Tget_size(split_type_id); -// printf("Getting %d elements of size %zu from server %zu\n", -// split_num_elmts[i], H5Tget_size(split_type_id), server_idx); - - if(NULL == (split_data[i] = malloc(split_data_size))) - HGOTO_ERROR_FF(FAIL, "can't allocate farm buffer"); + if(0 != farm_output[i].num_elmts) { + /* Get split type ID and num_elemts + (all the arrays should have the same native type id) */ + server_idx = farm_output[i].server_idx; + split_type_id = farm_output[i].type_id; + split_num_elmts[i] = farm_output[i].num_elmts; + split_data_size = split_num_elmts[i] * H5Tget_size(split_type_id); + // fprintf(stderr, "Getting %d elements of size %zu from server %zu\n", + // split_num_elmts[i], H5Tget_size(split_type_id), server_idx); - HG_Bulk_handle_create(split_data[i], split_data_size, - HG_BULK_READWRITE, &bulk_handle); + if(NULL == (split_data[i] = malloc(split_data_size))) + HGOTO_ERROR_FF(FAIL, "can't allocate farm buffer"); - transfer_input.axe_id = farm_output[i].axe_id; - transfer_input.bulk_handle = bulk_handle; + HG_Bulk_handle_create(split_data[i], split_data_size, + HG_BULK_READWRITE, &bulk_handle); - /* forward a free call to the target server */ - if(HG_Forward(server_addr_g[server_idx], H5VL_EFF_ANALYSIS_FARM_TRANSFER, - &transfer_input, &transfer_output, &hg_reqs[i]) < 0) - HGOTO_ERROR_FF(FAIL, "failed to ship operation"); + transfer_input.axe_id = farm_output[i].axe_id; + transfer_input.bulk_handle = bulk_handle; - /* Wait for the farmed work to complete */ - if(HG_Wait(hg_reqs[i], HG_MAX_IDLE_TIME, HG_STATUS_IGNORE) < 0) - HGOTO_ERROR_FF(FAIL, "HG_Wait Failed"); + /* forward a free call to the target server */ + if(HG_Forward(server_addr_g[server_idx], H5VL_EFF_ANALYSIS_FARM_TRANSFER, + &transfer_input, &transfer_output, &hg_reqs[i]) < 0) + HGOTO_ERROR_FF(FAIL, "failed to ship operation"); - /* Free bulk handle */ - HG_Bulk_handle_free(bulk_handle); + /* Wait for the farmed work to complete */ + if(HG_Wait(hg_reqs[i], HG_MAX_IDLE_TIME, HG_STATUS_IGNORE) < 0) + HGOTO_ERROR_FF(FAIL, "HG_Wait Failed"); + /* Free bulk handle */ + HG_Bulk_handle_free(bulk_handle); + } + else { + split_num_elmts[i] = 0; + split_data[i] = NULL; + } /* Free Mercury request */ if(HG_Request_free(hg_reqs[i]) != HG_SUCCESS) HGOTO_ERROR_FF(FAIL, "Can't Free Mercury Request"); @@ -726,8 +748,9 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, free(hg_reqs); if(farm_output) free(farm_output); - - printf("(%d) Applying combine on data\n", my_rank_g); +#if H5_EFF_DEBUG + fprintf(stderr, "(%d) Applying combine on data\n", my_rank_g); +#endif #ifdef H5_HAVE_PYTHON if (H5VL__iod_combine(combine_script, split_data, split_num_elmts, num_targets, split_type_id, &combine_data, @@ -738,12 +761,18 @@ H5VL__iod_farm_work(iod_obj_map_t *obj_map, iod_handle_t *cohs, /* free farm data */ if (split_data) { for (i = 0; i < num_targets; i++) { - free(split_data[i]); + if(split_data[i]) { + free(split_data[i]); + split_data[i] = NULL; + } } free(split_data); + split_data = NULL; + } + if(split_num_elmts) { + free(split_num_elmts); + split_num_elmts = NULL; } - free(split_num_elmts); - done: return ret_value; } /* end H5VL__iod_farm_work */ @@ -874,7 +903,7 @@ H5VL_iod_server_analysis_execute_cb(AXE_engine_t UNUSED axe_engine, query_id, split_script, combine_script); if(SUCCEED != ret) HGOTO_ERROR_FF(ret, "can't farm work"); - + fprintf(stderr, "DONE with combine phase\n"); /********************************************/ ret = iod_obj_free_map(obj_oh.rd_oh, obj_map); @@ -941,7 +970,7 @@ H5VL__iod_farm_split(iod_handle_t coh, iod_obj_id_t obj_id, iod_trans_id_t rtid, size_t *split_num_elmts, hid_t *split_type_id) { void *data = NULL; - size_t num_elmts; + size_t num_elmts = 0; herr_t ret_value = SUCCEED; hid_t space_layout; @@ -958,21 +987,24 @@ H5VL__iod_farm_split(iod_handle_t coh, iod_obj_id_t obj_id, iod_trans_id_t rtid, /* Apply split python script on data from query */ #ifdef H5_HAVE_PYTHON - if(FAIL == H5VL__iod_split(split_script, data, num_elmts, type_id, - split_data, split_num_elmts, split_type_id)) - HGOTO_ERROR_FF(FAIL, "can't apply split script to data"); + if(num_elmts) { + if(FAIL == H5VL__iod_split(split_script, data, num_elmts, type_id, + split_data, split_num_elmts, split_type_id)) + HGOTO_ERROR_FF(FAIL, "can't apply split script to data"); + } #endif /* Free the data after split operation */ - H5MM_free(data); - data = NULL; - + if(data) { + H5MM_free(data); + data = NULL; + } done: if(space_layout) H5Sclose(space_layout); return ret_value; -} /* end H5VL__iod_farm_work */ +} /* end H5VL__iod_farm_split */ /*------------------------------------------------------------------------- * Function: H5VL_iod_server_analysis_farm_cb @@ -1003,8 +1035,8 @@ H5VL_iod_server_analysis_farm_cb(AXE_engine_t UNUSED axe_engine, const char *split_script = input->split_script; iod_size_t num_cells = input->num_cells; void *split_data = NULL; - size_t split_num_elmts; - hid_t split_type_id; + size_t split_num_elmts = 0; + hid_t split_type_id = FAIL; herr_t ret_value = SUCCEED; if(H5VL__iod_farm_split(coh, obj_id, rtid, space_id, coords, num_cells, @@ -1028,6 +1060,10 @@ H5VL_iod_server_analysis_farm_cb(AXE_engine_t UNUSED axe_engine, HG_Handler_start_output(op_data->hg_handle, &output->farm_out); done: + if(0 == split_num_elmts) { + output = (H5VLiod_farm_data_t *)H5MM_xfree(output); + op_data = (op_data_t *)H5MM_xfree(op_data); + } input = (analysis_farm_in_t *)H5MM_xfree(input); } /* end H5VL_iod_server_analysis_farm_cb() */ @@ -1066,8 +1102,9 @@ H5VL_iod_server_analysis_transfer_cb(AXE_engine_t axe_engine, farm_output = (H5VLiod_farm_data_t *)farm_op_data->output; data_size = HG_Bulk_handle_get_size(input->bulk_handle); - printf("(%d) Transferring split data back to master\n", my_rank_g); - +#if H5_EFF_DEBUG + fprintf(stderr, "(%d) Transferring split data back to master\n", my_rank_g); +#endif HG_Bulk_handle_create(farm_output->data, data_size, HG_BULK_READ_ONLY, &bulk_block_handle); @@ -1093,7 +1130,7 @@ done: input = (analysis_transfer_in_t *)H5MM_xfree(input); op_data = (op_data_t *)H5MM_xfree(op_data); -} /* end H5VL_iod_server_analysis_farm_cb() */ +} /* end H5VL_iod_server_analysis_transfer_cb() */ /*------------------------------------------------------------------------- * Function: H5VL__iod_get_space_layout @@ -1120,8 +1157,8 @@ H5VL__iod_get_space_layout(coords_t coords, iod_size_t num_cells, hid_t space_id ndims = H5Sget_simple_extent_ndims(space_id); /* copy the original dataspace and reset selection to NONE */ - if(FAIL == (space_layout = H5Scopy(space_id))) - HGOTO_ERROR_FF(FAIL, "unable to copy dataspace"); + if((space_layout = H5Scopy(space_id)) < 0) + HGOTO_ERROR_FF(FAIL, "unable to copy dataspace") for(i=0 ; i<ndims ; i++) { start[i] = coords.start_cell[i]; |