summaryrefslogtreecommitdiffstats
path: root/java/src/hdf/hdf5lib
diff options
context:
space:
mode:
Diffstat (limited to 'java/src/hdf/hdf5lib')
-rw-r--r--java/src/hdf/hdf5lib/H5.java66
-rw-r--r--java/src/hdf/hdf5lib/HDF5Constants.java10
-rw-r--r--java/src/hdf/hdf5lib/HDF5GroupInfo.java24
-rw-r--r--java/src/hdf/hdf5lib/HDFArray.java62
-rw-r--r--java/src/hdf/hdf5lib/HDFNativeData.java30
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java2
-rw-r--r--java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java243
-rw-r--r--java/src/hdf/hdf5lib/structs/H5E_error2_t.java22
-rw-r--r--java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java33
-rw-r--r--java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java20
-rw-r--r--java/src/hdf/hdf5lib/structs/H5F_info2_t.java39
-rw-r--r--java/src/hdf/hdf5lib/structs/H5G_info_t.java12
-rw-r--r--java/src/hdf/hdf5lib/structs/H5L_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java30
-rw-r--r--java/src/hdf/hdf5lib/structs/H5O_info_t.java39
-rw-r--r--java/src/hdf/hdf5lib/structs/H5O_native_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5O_token_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5_ih_info_t.java6
18 files changed, 526 insertions, 142 deletions
diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java
index 960c872..63547df 100644
--- a/java/src/hdf/hdf5lib/H5.java
+++ b/java/src/hdf/hdf5lib/H5.java
@@ -512,8 +512,8 @@ public class H5 implements java.io.Serializable {
*
* @param file_export_name
* The file name to export data into.
- * @param file_name
- * The name of the HDF5 file containing the dataset.
+ * @param file_id
+ * The identifier of the HDF5 file containing the dataset.
* @param object_path
* The full path of the dataset to be exported.
* @param binary_order
@@ -525,10 +525,31 @@ public class H5 implements java.io.Serializable {
* @exception HDF5LibraryException
* - Error from the HDF-5 Library.
**/
- public synchronized static native void H5export_dataset(String file_export_name, String file_name,
+ public synchronized static native void H5export_dataset(String file_export_name, long file_id,
String object_path, int binary_order) throws HDF5LibraryException;
/**
+ * H5export_attribute is a utility function to save data in a file.
+ *
+ * @param file_export_name
+ * The file name to export data into.
+ * @param dataset_id
+ * The identifier of the dataset containing the attribute.
+ * @param attribute_name
+ * The attribute to be exported.
+ * @param binary_order
+ * 99 - export data as text.
+ * 1 - export data as binary Native Order.
+ * 2 - export data as binary Little Endian.
+ * 3 - export data as binary Big Endian.
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native void H5export_attribute(String file_export_name, long dataset_id,
+ String attribute_name, int binary_order) throws HDF5LibraryException;
+
+ /**
* H5is_library_threadsafe Checks to see if the library was built with thread-safety enabled.
*
* @return true if hdf5 library implements threadsafe
@@ -10705,7 +10726,18 @@ public class H5 implements java.io.Serializable {
* @exception IllegalArgumentException
* - an input array is invalid.
**/
- public synchronized static native long H5Ropen_object(byte[] ref_ptr, long rapl_id, long oapl_id)
+ public static long H5Ropen_object(byte[] ref_ptr, long rapl_id, long oapl_id)
+ throws HDF5LibraryException, NullPointerException, IllegalArgumentException {
+ long id = _H5Ropen_object(ref_ptr, rapl_id, oapl_id);
+ if (id > 0) {
+ log.trace("OPEN_IDS: H5Ropen_object add {}", id);
+ OPEN_IDS.add(id);
+ log.trace("OPEN_IDS: {}", OPEN_IDS.size());
+ }
+ return id;
+ }
+
+ private synchronized static native long _H5Ropen_object(byte[] ref_ptr, long rapl_id, long oapl_id)
throws HDF5LibraryException, NullPointerException, IllegalArgumentException;
/**
@@ -10733,7 +10765,18 @@ public class H5 implements java.io.Serializable {
* @exception IllegalArgumentException
* - an input array is invalid.
**/
- public synchronized static native long H5Ropen_region(byte[] ref_ptr, long rapl_id, long oapl_id)
+ public static long H5Ropen_region(byte[] ref_ptr, long rapl_id, long oapl_id)
+ throws HDF5LibraryException, NullPointerException, IllegalArgumentException {
+ long id = _H5Ropen_region(ref_ptr, rapl_id, oapl_id);
+ if (id > 0) {
+ log.trace("OPEN_IDS: H5Ropen_region add {}", id);
+ OPEN_IDS.add(id);
+ log.trace("OPEN_IDS: {}", OPEN_IDS.size());
+ }
+ return id;
+ }
+
+ private synchronized static native long _H5Ropen_region(byte[] ref_ptr, long rapl_id, long oapl_id)
throws HDF5LibraryException, NullPointerException, IllegalArgumentException;
/**
@@ -10761,7 +10804,18 @@ public class H5 implements java.io.Serializable {
* @exception IllegalArgumentException
* - an input array is invalid.
**/
- public synchronized static native long H5Ropen_attr(byte[] ref_ptr, long rapl_id, long aapl_id)
+ public static long H5Ropen_attr(byte[] ref_ptr, long rapl_id, long aapl_id)
+ throws HDF5LibraryException, NullPointerException, IllegalArgumentException {
+ long id = _H5Ropen_attr(ref_ptr, rapl_id, aapl_id);
+ if (id > 0) {
+ log.trace("OPEN_IDS: H5Ropen_attr add {}", id);
+ OPEN_IDS.add(id);
+ log.trace("OPEN_IDS: {}", OPEN_IDS.size());
+ }
+ return id;
+ }
+
+ private synchronized static native long _H5Ropen_attr(byte[] ref_ptr, long rapl_id, long aapl_id)
throws HDF5LibraryException, NullPointerException, IllegalArgumentException;
// Get type //
diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java
index 334abee..4125907 100644
--- a/java/src/hdf/hdf5lib/HDF5Constants.java
+++ b/java/src/hdf/hdf5lib/HDF5Constants.java
@@ -55,15 +55,15 @@ public class HDF5Constants {
public static final int H5_INDEX_CRT_ORDER = H5_INDEX_CRT_ORDER();
/** indices on links, number of indices defined */
public static final int H5_INDEX_N = H5_INDEX_N();
- /** */
+ /** Common iteration orders, Unknown order */
public static final int H5_ITER_UNKNOWN = H5_ITER_UNKNOWN();
- /** */
+ /** Common iteration orders, Increasing order */
public static final int H5_ITER_INC = H5_ITER_INC();
- /** */
+ /** Common iteration orders, Decreasing order */
public static final int H5_ITER_DEC = H5_ITER_DEC();
- /** */
+ /** Common iteration orders, No particular order, whatever is fastest */
public static final int H5_ITER_NATIVE = H5_ITER_NATIVE();
- /** */
+ /** Common iteration orders, Number of iteration orders */
public static final int H5_ITER_N = H5_ITER_N();
/** */
public static final int H5AC_CURR_CACHE_CONFIG_VERSION = H5AC_CURR_CACHE_CONFIG_VERSION();
diff --git a/java/src/hdf/hdf5lib/HDF5GroupInfo.java b/java/src/hdf/hdf5lib/HDF5GroupInfo.java
index e08f991..4c31af7 100644
--- a/java/src/hdf/hdf5lib/HDF5GroupInfo.java
+++ b/java/src/hdf/hdf5lib/HDF5GroupInfo.java
@@ -90,32 +90,44 @@ public class HDF5GroupInfo {
linklen = 0;
}
- /** fileno accessors */
+ /** fileno accessors
+ * @return the file number if successful
+ */
public long[] getFileno() {
return fileno;
}
- /** accessors */
+ /** accessors
+ * @return the object number if successful
+ */
public long[] getObjno() {
return objno;
}
- /** accessors */
+ /** accessors
+ * @return type of group if successful
+ */
public int getType() {
return type;
}
- /** accessors */
+ /** accessors
+ * @return the number of links in the group if successful
+ */
public int getNlink() {
return nlink;
}
- /** accessors */
+ /** accessors
+ * @return the modified time value if successful
+ */
public long getMtime() {
return mtime;
}
- /** accessors */
+ /** accessors
+ * @return a length of link name if successful
+ */
public int getLinklen() {
return linklen;
}
diff --git a/java/src/hdf/hdf5lib/HDFArray.java b/java/src/hdf/hdf5lib/HDFArray.java
index 385d71b..21e2b02 100644
--- a/java/src/hdf/hdf5lib/HDFArray.java
+++ b/java/src/hdf/hdf5lib/HDFArray.java
@@ -46,10 +46,10 @@ public class HDFArray {
*
* @param anArray
* The array object.
- * @exception hdf.hdf5lib.exceptions.HDF5Exception
- * object is not an array.
+ * @exception hdf.hdf5lib.exceptions.HDF5JavaException
+ * object is not an array.
*/
- public HDFArray(Object anArray) throws HDF5Exception
+ public HDFArray(Object anArray) throws HDF5JavaException
{
if (anArray == null) {
HDF5JavaException ex = new HDF5JavaException("HDFArray: array is null?: ");
@@ -76,16 +76,14 @@ public class HDFArray {
* @return A one-D array of bytes, filled with zeroes. The bytes are sufficient to hold the data of the Array passed
* to the constructor.
* @exception hdf.hdf5lib.exceptions.HDF5JavaException
- * Allocation failed.
+ * Allocation failed.
*/
- public byte[] emptyBytes()
- throws HDF5JavaException
+ public byte[] emptyBytes() throws HDF5JavaException
{
byte[] b = null;
- if ((ArrayDescriptor.dims == 1)
- && (ArrayDescriptor.NT == 'B')) {
+ if ((ArrayDescriptor.dims == 1) && (ArrayDescriptor.NT == 'B')) {
b = (byte[]) _theArray;
}
else {
@@ -103,10 +101,9 @@ public class HDFArray {
*
* @return A one-D array of bytes, constructed from the Array passed to the constructor.
* @exception hdf.hdf5lib.exceptions.HDF5JavaException
- * the object not an array or other internal error.
+ * the object not an array or other internal error.
*/
- public byte[] byteify()
- throws HDF5JavaException
+ public byte[] byteify() throws HDF5JavaException
{
if (_barray != null) {
return _barray;
@@ -224,8 +221,6 @@ public class HDFArray {
if (ArrayDescriptor.NT == 'J') {
arow = HDFNativeData.longToByte(0, ArrayDescriptor.dimlen[ArrayDescriptor.dims],
(long[]) ArrayDescriptor.objs[ArrayDescriptor.dims - 1]);
- arow = HDFNativeData.longToByte(0, ArrayDescriptor.dimlen[ArrayDescriptor.dims],
- (long[]) ArrayDescriptor.objs[ArrayDescriptor.dims - 1]);
}
else if (ArrayDescriptor.NT == 'I') {
arow = HDFNativeData.intToByte(0, ArrayDescriptor.dimlen[ArrayDescriptor.dims],
@@ -552,8 +547,7 @@ public class HDFArray {
+ "?"));
}
}
- if (ArrayDescriptor.currentindex[ArrayDescriptor.dims - 1] != ArrayDescriptor.dimlen[ArrayDescriptor.dims
- - 1]) {
+ if (ArrayDescriptor.currentindex[ArrayDescriptor.dims - 1] != ArrayDescriptor.dimlen[ArrayDescriptor.dims - 1]) {
throw new java.lang.InternalError(
new String("HDFArray::arrayify Panic didn't complete all data: currentindex[" + i + "] = "
+ ArrayDescriptor.currentindex[i] + " (should be " + (ArrayDescriptor.dimlen[i]) + "?"));
@@ -580,7 +574,7 @@ public class HDFArray {
Integer[] out = new Integer[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Integer(in[i]);
+ out[i] = Integer.valueOf(in[i]);
}
return out;
}
@@ -592,7 +586,7 @@ public class HDFArray {
Integer[] out = new Integer[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Integer(in[i]);
+ out[i] = Integer.valueOf(in[i]);
}
return out;
}
@@ -615,7 +609,7 @@ public class HDFArray {
Short[] out = new Short[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Short(in[i]);
+ out[i] = Short.valueOf(in[i]);
}
return out;
}
@@ -627,7 +621,7 @@ public class HDFArray {
Short[] out = new Short[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Short(in[i]);
+ out[i] = Short.valueOf(in[i]);
}
return out;
}
@@ -649,7 +643,7 @@ public class HDFArray {
Byte[] out = new Byte[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Byte(bin[i]);
+ out[i] = Byte.valueOf(bin[i]);
}
return out;
}
@@ -659,7 +653,7 @@ public class HDFArray {
Byte[] out = new Byte[len];
for (int i = 0; i < len; i++) {
- out[i] = new Byte(bin[i]);
+ out[i] = Byte.valueOf(bin[i]);
}
return out;
}
@@ -682,7 +676,7 @@ public class HDFArray {
Float[] out = new Float[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Float(in[i]);
+ out[i] = Float.valueOf(in[i]);
}
return out;
}
@@ -694,7 +688,7 @@ public class HDFArray {
Float[] out = new Float[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Float(in[i]);
+ out[i] = Float.valueOf(in[i]);
}
return out;
}
@@ -717,7 +711,7 @@ public class HDFArray {
Double[] out = new Double[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Double(in[i]);
+ out[i] = Double.valueOf(in[i]);
}
return out;
}
@@ -729,7 +723,7 @@ public class HDFArray {
Double[] out = new Double[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Double(in[i]);
+ out[i] = Double.valueOf(in[i]);
}
return out;
}
@@ -752,7 +746,7 @@ public class HDFArray {
Long[] out = new Long[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Long(in[i]);
+ out[i] = Long.valueOf(in[i]);
}
return out;
}
@@ -764,7 +758,7 @@ public class HDFArray {
Long[] out = new Long[nelems];
for (int i = 0; i < nelems; i++) {
- out[i] = new Long(in[i]);
+ out[i] = Long.valueOf(in[i]);
}
return out;
}
@@ -790,12 +784,12 @@ class ArrayDescriptor {
static int dims = 0;
static String className;
- public ArrayDescriptor(Object anArray) throws HDF5Exception
+ public ArrayDescriptor(Object anArray) throws HDF5JavaException
{
Class tc = anArray.getClass();
if (tc.isArray() == false) {
/* exception: not an array */
- HDF5Exception ex = new HDF5JavaException("ArrayDescriptor: not an array?: ");
+ HDF5JavaException ex = new HDF5JavaException("ArrayDescriptor: not an array?: ");
throw (ex);
}
@@ -827,12 +821,10 @@ class ArrayDescriptor {
else if (NT == 'S') {
NTsize = 2;
}
- else if ((NT == 'I')
- || (NT == 'F')) {
+ else if ((NT == 'I') || (NT == 'F')) {
NTsize = 4;
}
- else if ((NT == 'J')
- || (NT == 'D')) {
+ else if ((NT == 'J') || (NT == 'D')) {
NTsize = 8;
}
else if (css.startsWith("Ljava.lang.Byte")) {
@@ -925,8 +917,8 @@ class ArrayDescriptor {
System.out.println("Type: " + theType);
System.out.println("Class: " + theClass);
System.out.println("NT: " + NT + " NTsize: " + NTsize);
- System.out
- .println("Array has " + dims + " dimensions (" + totalSize + " bytes, " + totalElements + " elements)");
+ System.out.println("Array has " + dims + " dimensions (" + totalSize
+ + " bytes, " + totalElements + " elements)");
int i;
for (i = 0; i <= dims; i++) {
Class tc = objs[i].getClass();
diff --git a/java/src/hdf/hdf5lib/HDFNativeData.java b/java/src/hdf/hdf5lib/HDFNativeData.java
index 5b29050..85378db 100644
--- a/java/src/hdf/hdf5lib/HDFNativeData.java
+++ b/java/src/hdf/hdf5lib/HDFNativeData.java
@@ -153,8 +153,7 @@ public class HDFNativeData {
* The input array of bytes
* @return an array of 'len' float
*/
- public synchronized static native float[] byteToFloat(int start, int len,
- byte[] data);
+ public synchronized static native float[] byteToFloat(int start, int len, byte[] data);
/**
* Convert 4 bytes from an array of bytes into a single float
@@ -437,41 +436,38 @@ public class HDFNativeData {
* - Error unsupported type.
*/
public synchronized static Object byteToNumber(byte[] barray, Object obj)
- throws HDF5Exception {
+ throws HDF5Exception
+ {
Class theClass = obj.getClass();
String type = theClass.getName();
Object retobj = null;
if (type.equals("java.lang.Integer")) {
int[] i = hdf.hdf5lib.HDFNativeData.byteToInt(0, 1, barray);
- retobj = new Integer(i[0]);
+ retobj = Integer.valueOf(i[0]);
}
else if (type.equals("java.lang.Byte")) {
- retobj = new Byte(barray[0]);
+ retobj = Byte.valueOf(barray[0]);
}
else if (type.equals("java.lang.Short")) {
- short[] f = hdf.hdf5lib.HDFNativeData
- .byteToShort(0, 1, barray);
- retobj = new Short(f[0]);
+ short[] f = hdf.hdf5lib.HDFNativeData.byteToShort(0, 1, barray);
+ retobj = Short.valueOf(f[0]);
}
else if (type.equals("java.lang.Float")) {
- float[] f = hdf.hdf5lib.HDFNativeData
- .byteToFloat(0, 1, barray);
- retobj = new Float(f[0]);
+ float[] f = hdf.hdf5lib.HDFNativeData.byteToFloat(0, 1, barray);
+ retobj = Float.valueOf(f[0]);
}
else if (type.equals("java.lang.Long")) {
long[] f = hdf.hdf5lib.HDFNativeData.byteToLong(0, 1, barray);
- retobj = new Long(f[0]);
+ retobj = Long.valueOf(f[0]);
}
else if (type.equals("java.lang.Double")) {
- double[] f = hdf.hdf5lib.HDFNativeData.byteToDouble(0, 1,
- barray);
- retobj = new Double(f[0]);
+ double[] f = hdf.hdf5lib.HDFNativeData.byteToDouble(0, 1, barray);
+ retobj = Double.valueOf(f[0]);
}
else {
/* exception: unsupported type */
- HDF5Exception ex = new HDF5JavaException(
- "byteToNumber: setfield bad type: " + obj + " " + type);
+ HDF5Exception ex = new HDF5JavaException("byteToNumber: setfield bad type: " + obj + " " + type);
throw (ex);
}
return (retobj);
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java b/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
index e3e774c..532355e 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
@@ -30,7 +30,9 @@ import hdf.hdf5lib.HDF5Constants;
@SuppressWarnings("serial")
public class HDF5LibraryException extends HDF5Exception {
+ /** major error number of the first error on the HDF5 library error stack. */
private final long majorErrorNumber;
+ /** minor error number of the first error on the HDF5 library error stack. */
private final long minorErrorNumber;
/**
diff --git a/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java b/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
index 9f04211..cf84532 100644
--- a/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
@@ -20,41 +20,270 @@ import java.io.Serializable;
*/
public class H5AC_cache_config_t implements Serializable{
private static final long serialVersionUID = -6748085696476149972L;
- // general configuration fields:
+ // general configuration fields
+ /**
+ * version: Integer field containing the version number of this version
+ * of the H5AC_cache_config_t structure. Any instance of
+ * H5AC_cache_config_t passed to the cache must have a known
+ * version number, or an error will be flagged.
+ */
public int version;
+ /**
+ * rpt_fcn_enabled: Boolean field used to enable and disable the default
+ * reporting function. This function is invoked every time the
+ * automatic cache resize code is run, and reports on its activities.
+ *
+ * This is a debugging function, and should normally be turned off.
+ */
public boolean rpt_fcn_enabled;
+ /**
+ * open_trace_file: Boolean field indicating whether the trace_file_name
+ * field should be used to open a trace file for the cache.
+ *
+ * *** DEPRECATED *** Use H5Fstart/stop logging functions instead
+ */
public boolean open_trace_file;
+ /**
+ * close_trace_file: Boolean field indicating whether the current trace
+ * file (if any) should be closed.
+ *
+ * *** DEPRECATED *** Use H5Fstart/stop logging functions instead
+ */
public boolean close_trace_file;
+ /**
+ * trace_file_name: Full path of the trace file to be opened if the
+ * open_trace_file field is TRUE.
+ *
+ * *** DEPRECATED *** Use H5Fstart/stop logging functions instead
+ */
public String trace_file_name;
+ /**
+ * evictions_enabled: Boolean field used to either report the current
+ * evictions enabled status of the cache, or to set the cache's
+ * evictions enabled status.
+ */
public boolean evictions_enabled;
+ /**
+ * set_initial_size: Boolean flag indicating whether the size of the
+ * initial size of the cache is to be set to the value given in
+ * the initial_size field. If set_initial_size is FALSE, the
+ * initial_size field is ignored.
+ */
public boolean set_initial_size;
+ /**
+ * initial_size: If enabled, this field contain the size the cache is
+ * to be set to upon receipt of this structure. Needless to say,
+ * initial_size must lie in the closed interval [min_size, max_size].
+ */
public long initial_size;
+ /**
+ * min_clean_fraction: double in the range 0 to 1 indicating the fraction
+ * of the cache that is to be kept clean. This field is only used
+ * in parallel mode. Typical values are 0.1 to 0.5.
+ */
public double min_clean_fraction;
+ /**
+ * max_size: Maximum size to which the cache can be adjusted. The
+ * supplied value must fall in the closed interval
+ * [MIN_MAX_CACHE_SIZE, MAX_MAX_CACHE_SIZE]. Also, max_size must
+ * be greater than or equal to min_size.
+ */
public long max_size;
+ /**
+ * min_size: Minimum size to which the cache can be adjusted. The
+ * supplied value must fall in the closed interval
+ * [H5C__MIN_MAX_CACHE_SIZE, H5C__MAX_MAX_CACHE_SIZE]. Also, min_size
+ * must be less than or equal to max_size.
+ */
public long min_size;
+ /**
+ * epoch_length: Number of accesses on the cache over which to collect
+ * hit rate stats before running the automatic cache resize code,
+ * if it is enabled.
+ */
public long epoch_length;
- // size increase control fields:
- public int incr_mode; // H5C_cache_incr_mode
+ // size increase control fields
+ /**
+ * incr_mode: Instance of the H5C_cache_incr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * increased. At present there are two possible values.
+ */
+ public int incr_mode;
+ /**
+ * lower_hr_threshold: Lower hit rate threshold. If the increment mode
+ * (incr_mode) is H5C_incr__threshold and the hit rate drops below the
+ * value supplied in this field in an epoch, increment the cache size by
+ * size_increment. Note that cache size may not be incremented above
+ * max_size, and that the increment may be further restricted by the
+ * max_increment field if it is enabled.
+ */
public double lower_hr_threshold;
+ /**
+ * increment: Double containing the multiplier used to derive the new
+ * cache size from the old if a cache size increment is triggered.
+ * The increment must be greater than 1.0, and should not exceed 2.0.
+ */
public double increment;
+ /**
+ * apply_max_increment: Boolean flag indicating whether the max_increment
+ * field should be used to limit the maximum cache size increment.
+ */
public boolean apply_max_increment;
+ /**
+ * max_increment: If enabled by the apply_max_increment field described
+ * above, this field contains the maximum number of bytes by which the
+ * cache size can be increased in a single re-size.
+ */
public long max_increment;
- public int flash_incr_mode; // H5C_cache_flash_incr_mode
+ /**
+ * flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated
+ * type whose value indicates whether and by which algorithm we should
+ * make flash increases in the size of the cache to accommodate insertion
+ * of large entries and large increases in the size of a single entry.
+ */
+ public int flash_incr_mode;
+ /**
+ * flash_multiple: Double containing the multiple described above in the
+ * H5C_flash_incr__add_space section of the discussion of the
+ * flash_incr_mode section. This field is ignored unless flash_incr_mode
+ * is H5C_flash_incr__add_space.
+ */
public double flash_multiple;
+ /**
+ * flash_threshold: Double containing the factor by which current max cache
+ * size is multiplied to obtain the size threshold for the add_space flash
+ * increment algorithm. The field is ignored unless flash_incr_mode is
+ * H5C_flash_incr__add_space.
+ */
public double flash_threshold;
- // size decrease control fields:
- public int decr_mode; // H5C_cache_decr_mode
+ // size decrease control fields
+ /**
+ * decr_mode: Instance of the H5C_cache_decr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * decreased. At present there are four possibilities.
+ */
+ public int decr_mode;
+ /**
+ * upper_hr_threshold: Upper hit rate threshold. The use of this field
+ * varies according to the current decr_mode.
+ */
public double upper_hr_threshold;
+ /**
+ * decrement: This field is only used when the decr_mode is
+ * H5C_decr__threshold.
+ */
public double decrement;
+ /**
+ * apply_max_decrement: Boolean flag used to determine whether decrements
+ * in cache size are to be limited by the max_decrement field.
+ */
public boolean apply_max_decrement;
+ /**
+ * max_decrement: Maximum number of bytes by which the cache size can be
+ * decreased in a single re-size. Note that decrements may also be
+ * restricted by the min_size of the cache, and (in age out modes) by
+ * the empty_reserve field.
+ */
public long max_decrement;
+ /**
+ * epochs_before_eviction: Integer field used in H5C_decr__age_out and
+ * H5C_decr__age_out_with_threshold decrement modes.
+ */
public int epochs_before_eviction;
+ /**
+ * apply_empty_reserve: Boolean field controlling whether the empty_reserve
+ * field is to be used in computing the new cache size when the
+ * decr_mode is H5C_decr__age_out or H5C_decr__age_out_with_threshold.
+ */
public boolean apply_empty_reserve;
+ /**
+ * empty_reserve: To avoid a constant racheting down of cache size by small
+ * amounts in the H5C_decr__age_out and H5C_decr__age_out_with_threshold
+ * modes, this field allows one to require that any cache size
+ * reductions leave the specified fraction of unused space in the cache.
+ */
public double empty_reserve;
- // parallel configuration fields:
+ // parallel configuration fields
+ /**
+ * dirty_bytes_threshold: Threshold of dirty byte creation used to
+ * synchronize updates between caches.
+ */
public long dirty_bytes_threshold;
+ /**
+ * metadata_write_strategy: Integer field containing a code indicating the
+ * desired metadata write strategy.
+ */
public int metadata_write_strategy;
+ /** H5AC_cache_config_t is a public structure intended for use in public APIs.
+ * At least in its initial incarnation, it is basically a copy of struct
+ * H5C_auto_size_ctl_t, minus the report_fcn field, and plus the
+ * dirty_bytes_threshold field.
+ *
+ * @param version: Integer field containing the version number of this version
+ * @param rpt_fcn_enabled: Boolean field used to enable and disable the default reporting function.
+ * @param open_trace_file: Boolean field indicating whether the trace_file_name
+ * field should be used to open a trace file for the cache.
+ * @param close_trace_file: Boolean field indicating whether the current trace
+ * file (if any) should be closed.
+ * @param trace_file_name: Full path of the trace file to be opened if the
+ * open_trace_file field is TRUE.
+ * @param evictions_enabled: Boolean field used to either report or set the current
+ * evictions enabled status of the cache.
+ * @param set_initial_size: Boolean flag indicating whether the size of the
+ * initial size of the cache is to be set to the value given in
+ * the initial_size field.
+ * @param initial_size: If enabled, this field contain the size the cache is
+ * to be set to upon receipt of this structure.
+ * @param min_clean_fraction: double in the range 0 to 1 indicating the fraction
+ * of the cache that is to be kept clean.
+ * @param max_size: Maximum size to which the cache can be adjusted.
+ * @param min_size: Minimum size to which the cache can be adjusted.
+ * @param epoch_length: Number of accesses on the cache over which to collect
+ * hit rate stats before running the automatic cache resize code.
+ * @param incr_mode: Instance of the H5C_cache_incr_mode enumerated type.
+ * @param lower_hr_threshold: Lower hit rate threshold.
+ * @param increment: Double containing the multiplier used to derive the new
+ * cache size from the old if a cache size increment is triggered.
+ * @param apply_max_increment: Boolean flag indicating whether the max_increment
+ * field should be used to limit the maximum cache size increment.
+ * @param max_increment: If enabled by the apply_max_increment field described
+ * above, this field contains the maximum number of bytes by which the
+ * cache size can be increased in a single re-size.
+ * @param flash_incr_mode: Instance of the H5C_cache_flash_incr_mode enumerated
+ * type whose value indicates whether and by which algorithm we should
+ * make flash increases in the size of the cache to accommodate insertion
+ * of large entries and large increases in the size of a single entry.
+ * @param flash_multiple: Double containing the multiple described above in the
+ * H5C_flash_incr__add_space section of the discussion of the
+ * flash_incr_mode section.
+ * @param flash_threshold: Double containing the factor by which current max cache
+ * size is multiplied to obtain the size threshold for the add_space flash
+ * increment algorithm.
+ * @param decr_mode: Instance of the H5C_cache_decr_mode enumerated type whose
+ * value indicates how we determine whether the cache size should be
+ * decreased.
+ * @param upper_hr_threshold: Upper hit rate threshold. The use of this field
+ * varies according to the current decr_mode.
+ * @param decrement: This field is only used when the decr_mode is
+ * H5C_decr__threshold.
+ * @param apply_max_decrement: Boolean flag used to determine whether decrements
+ * in cache size are to be limited by the max_decrement field.
+ * @param max_decrement: Maximum number of bytes by which the cache size can be
+ * decreased in a single re-size.
+ * @param epochs_before_eviction: Integer field used in H5C_decr__age_out and
+ * H5C_decr__age_out_with_threshold decrement modes.
+ * @param apply_empty_reserve: Boolean field controlling whether the empty_reserve
+ * field is to be used in computing the new cache size when the
+ * decr_mode is H5C_decr__age_out or H5C_decr__age_out_with_threshold.
+ * @param empty_reserve: To avoid a constant racheting down of cache size by small
+ * amounts in the H5C_decr__age_out and H5C_decr__age_out_with_threshold
+ * modes.
+ * @param dirty_bytes_threshold: Threshold of dirty byte creation used to
+ * synchronize updates between caches.
+ * @param metadata_write_strategy: Integer field containing a code indicating the
+ * desired metadata write strategy.
+ */
public H5AC_cache_config_t (int version, boolean rpt_fcn_enabled, boolean open_trace_file,
boolean close_trace_file, String trace_file_name, boolean evictions_enabled,
boolean set_initial_size, long initial_size, double min_clean_fraction, long max_size,
diff --git a/java/src/hdf/hdf5lib/structs/H5E_error2_t.java b/java/src/hdf/hdf5lib/structs/H5E_error2_t.java
index e074156..5981fc7 100644
--- a/java/src/hdf/hdf5lib/structs/H5E_error2_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5E_error2_t.java
@@ -20,14 +20,20 @@ import java.io.Serializable;
*/
public class H5E_error2_t implements Serializable{
private static final long serialVersionUID = 279144359041667613L;
-
- public long cls_id; //class ID
- public long maj_num; //major error ID
- public long min_num; //minor error number
- public int line; //line in file where error occurs
- public String func_name; //function in which error occurred
- public String file_name; //file in which error occurred
- public String desc; //optional supplied description
+ /** class ID */
+ public long cls_id;
+ /** major error ID */
+ public long maj_num;
+ /** minor error number */
+ public long min_num;
+ /** line in file where error occurs */
+ public int line;
+ /** function in which error occurred */
+ public String func_name;
+ /** file in which error occurred */
+ public String file_name;
+ /** optional supplied description */
+ public String desc;
H5E_error2_t(long cls_id, long maj_num, long min_num, int line, String func_name, String file_name, String desc) {
this.cls_id = cls_id;
diff --git a/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
index 26690ec..95a9254 100644
--- a/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
@@ -1,15 +1,14 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Read-Only HDFS Virtual File Driver (VFD) *
- * Copyright (c) 2018, The HDF Group. *
+ * Copyright by The HDF Group. *
* *
* All rights reserved. *
* *
- * NOTICE: *
- * All information contained herein is, and remains, the property of The HDF *
- * Group. The intellectual and technical concepts contained herein are *
- * proprietary to The HDF Group. Dissemination of this information or *
- * reproduction of this material is strictly forbidden unless prior written *
- * permission is obtained from The HDF Group. *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
@@ -26,15 +25,31 @@ import java.io.Serializable;
public class H5FD_hdfs_fapl_t implements Serializable {
private static final long serialVersionUID = 2072473407027648309L;
+ /** Version number of the H5FD_hdfs_fapl_t structure. */
private int version;
+ /** Name of "Name Node" to access as the HDFS server. */
private String namenode_name;
+ /** Port number to use to connect with Name Node. */
private int namenode_port;
+ /** Username to use when accessing file. */
private String user_name;
+ /** Path to the location of the Kerberos authentication cache. */
private String kerberos_ticket_cache;
+ /** Size (in bytes) of the file read stream buffer. */
private int stream_buffer_size;
- /*
+ /**
* Create a fapl_t structure with the specified components.
+ * @param namenode_name
+ * Name of "Name Node" to access as the HDFS server.
+ * @param namenode_port
+ * Port number to use to connect with Name Node.
+ * @param user_name
+ * Username to use when accessing file.
+ * @param kerberos_ticket_cache
+ * Path to the location of the Kerberos authentication cache.
+ * @param stream_buffer_size
+ * Size (in bytes) of the file read stream buffer.
*/
public H5FD_hdfs_fapl_t(
String namenode_name,
diff --git a/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java b/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
index 735cc7e..ad02979 100644
--- a/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
@@ -1,15 +1,14 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Read-Only S3 Virtual File Driver (VFD) *
- * Copyright (c) 2017-2018, The HDF Group. *
+ * Copyright by The HDF Group. *
* *
* All rights reserved. *
* *
- * NOTICE: *
- * All information contained herein is, and remains, the property of The HDF *
- * Group. The intellectual and technical concepts contained herein are *
- * proprietary to The HDF Group. Dissemination of this information or *
- * reproduction of this material is strictly forbidden unless prior written *
- * permission is obtained from The HDF Group. *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
@@ -46,10 +45,15 @@ import java.io.Serializable;
public class H5FD_ros3_fapl_t implements Serializable {
private static final long serialVersionUID = 8985533001471224030L;
+ /** Version number of the H5FD_ros3_fapl_t structure */
private int version;
+ /** Flag TRUE or FALSE whether or not requests are to be authenticated with the AWS4 algorithm. */
private boolean authenticate;
+ /** region "aws region" for authenticating request */
private String aws_region;
+ /** id "secret id" or "access id" for authenticating request */
private String secret_id;
+ /** key "secret key" or "access key" for authenticating request */
private String secret_key;
/**
diff --git a/java/src/hdf/hdf5lib/structs/H5F_info2_t.java b/java/src/hdf/hdf5lib/structs/H5F_info2_t.java
index bb87201..f951bb4 100644
--- a/java/src/hdf/hdf5lib/structs/H5F_info2_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5F_info2_t.java
@@ -20,16 +20,37 @@ import java.io.Serializable;
*/
public class H5F_info2_t implements Serializable{
private static final long serialVersionUID = 4691681162544054518L;
- public int super_version; // Superblock version #
- public long super_size; // Superblock size
- public long super_ext_size; // Superblock extension size
- public int free_version; // Version # of file free space management
- public long free_meta_size; // Free space manager metadata size
- public long free_tot_space; // Amount of free space in the file
- public int sohm_version; // Version # of shared object header info
- public long sohm_hdr_size; // Shared object header message header size
- public H5_ih_info_t sohm_msgs_info; // Shared object header message index & heap size
+ /** Superblock version number */
+ public int super_version;
+ /** Superblock size */
+ public long super_size;
+ /** Superblock extension size */
+ public long super_ext_size;
+ /** Version number of file free space management */
+ public int free_version;
+ /** Free space manager metadata size */
+ public long free_meta_size;
+ /** Amount of free space in the file */
+ public long free_tot_space;
+ /** Version number of shared object header info */
+ public int sohm_version;
+ /** Shared object header message header size */
+ public long sohm_hdr_size;
+ /** Shared object header message index and heap size */
+ public H5_ih_info_t sohm_msgs_info;
+ /**
+ * Constructor fot current "global" information about file
+ * @param super_version: Superblock version number
+ * @param super_size: Superblock size
+ * @param super_ext_size: Superblock extension size
+ * @param free_version: Version number of file free space management
+ * @param free_meta_size: Free space manager metadata size
+ * @param free_tot_space: Amount of free space in the file
+ * @param sohm_version: Version number of shared object header info
+ * @param sohm_hdr_size: Shared object header message header size
+ * @param sohm_msgs_info: Shared object header message index and heap size
+ */
public H5F_info2_t (int super_version, long super_size, long super_ext_size,
int free_version, long free_meta_size, long free_tot_space,
int sohm_version, long sohm_hdr_size, H5_ih_info_t sohm_msgs_info)
diff --git a/java/src/hdf/hdf5lib/structs/H5G_info_t.java b/java/src/hdf/hdf5lib/structs/H5G_info_t.java
index 6d4f405..e79f859 100644
--- a/java/src/hdf/hdf5lib/structs/H5G_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5G_info_t.java
@@ -20,8 +20,12 @@ import java.io.Serializable;
*/
public class H5G_info_t implements Serializable{
private static final long serialVersionUID = -3746463015312132912L;
- public int storage_type; // Type of storage for links in group
- public long nlinks; // Number of links in group
- public long max_corder; // Current max. creation order value for group
- public boolean mounted; // Whether group has a file mounted on it
+ /** Type of storage for links in group */
+ public int storage_type;
+ /** Number of links in group */
+ public long nlinks;
+ /** Current max. creation order value for group */
+ public long max_corder;
+ /** Whether group has a file mounted on it */
+ public boolean mounted;
}
diff --git a/java/src/hdf/hdf5lib/structs/H5L_info_t.java b/java/src/hdf/hdf5lib/structs/H5L_info_t.java
index eaf0da5..a3011c0 100644
--- a/java/src/hdf/hdf5lib/structs/H5L_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5L_info_t.java
@@ -22,14 +22,20 @@ import hdf.hdf5lib.HDF5Constants;
*/
public class H5L_info_t implements Serializable {
private static final long serialVersionUID = -4754320605310155033L;
+ /** Type of link */
public int type;
+ /** Indicate if creation order is valid */
public boolean corder_valid;
+ /** Creation order */
public long corder;
+ /** Character set of link name */
public int cset;
+ /** Character set of link name */
public H5O_token_t token;
+ /** Size of a soft link or user-defined link value */
public long val_size;
- // Constructor for using object token portion of C union
+ /** Constructor for using object token portion of C union */
H5L_info_t (int type, boolean corder_valid, long corder,
int cset, H5O_token_t token)
{
@@ -41,7 +47,7 @@ public class H5L_info_t implements Serializable {
this.val_size = -1;
}
- // Constructor for using val_size portion of C union
+ /** Constructor for using val_size portion of C union */
H5L_info_t (int type, boolean corder_valid, long corder,
int cset, long val_size)
{
diff --git a/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java b/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
index 9a1749d..2475dd9 100644
--- a/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
@@ -20,16 +20,26 @@ import java.io.Serializable;
*/
public class H5O_hdr_info_t implements Serializable {
private static final long serialVersionUID = 7883826382952577189L;
- public int version; /* Version number of header format in file */
- public int nmesgs; /* Number of object header messages */
- public int nchunks; /* Number of object header chunks */
- public int flags; /* Object header status flags */
- public long space_total; /* Total space for storing object header in file */
- public long space_meta; /* Space within header for object header metadata information */
- public long space_mesg; /* Space within header for actual message information */
- public long space_free; /* Free space within object header */
- public long mesg_present; /* Flags to indicate presence of message type in header */
- public long mesg_shared; /* Flags to indicate message type is shared in header */
+ /** Version number of header format in file */
+ public int version;
+ /** Number of object header messages */
+ public int nmesgs;
+ /** Number of object header chunks */
+ public int nchunks;
+ /** Object header status flags */
+ public int flags;
+ /** Total space for storing object header in file */
+ public long space_total;
+ /** Space within header for object header metadata information */
+ public long space_meta;
+ /** Space within header for actual message information */
+ public long space_mesg;
+ /** Free space within object header */
+ public long space_free;
+ /** Flags to indicate presence of message type in header */
+ public long mesg_present;
+ /** Flags to indicate message type is shared in header */
+ public long mesg_shared;
H5O_hdr_info_t (int version, int nmesgs, int nchunks, int flags,
long space_total, long space_meta, long space_mesg, long space_free,
diff --git a/java/src/hdf/hdf5lib/structs/H5O_info_t.java b/java/src/hdf/hdf5lib/structs/H5O_info_t.java
index cc94247..d2208d2 100644
--- a/java/src/hdf/hdf5lib/structs/H5O_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5O_info_t.java
@@ -20,16 +20,37 @@ import java.io.Serializable;
*/
public class H5O_info_t implements Serializable {
private static final long serialVersionUID = 4691681163544054518L;
- public long fileno; /* File number that object is located in */
- public H5O_token_t token; /* Object token in file */
- public int type; /* Basic object type (group, dataset, etc.) */
- public int rc; /* Reference count of object */
- public long atime; /* Access time */
- public long mtime; /* Modification time */
- public long ctime; /* Change time */
- public long btime; /* Birth time */
- public long num_attrs; /* # of attributes attached to object */
+ /** File number that object is located in */
+ public long fileno;
+ /** Object token in file */
+ public H5O_token_t token;
+ /** Basic object type (group, dataset, etc.) */
+ public int type;
+ /** Reference count of object */
+ public int rc;
+ /** Access time */
+ public long atime;
+ /** Modification time */
+ public long mtime;
+ /** Change time */
+ public long ctime;
+ /** Birth time */
+ public long btime;
+ /** Number of attributes attached to object */
+ public long num_attrs;
+ /** Constructor for data model information struct for objects
+ *
+ * @param fileno: File number that object is located in
+ * @param token: Object token in file
+ * @param type: Basic object type
+ * @param rc: Reference count of object
+ * @param atime: Access time
+ * @param mtime: Modification time
+ * @param ctime: Change time
+ * @param btime: Birth time
+ * @param num_attrs: Number of attributes attached to object
+ */
public H5O_info_t (long fileno, H5O_token_t token, int type,
int rc, long atime, long mtime, long ctime, long btime, long num_attrs)
{
diff --git a/java/src/hdf/hdf5lib/structs/H5O_native_info_t.java b/java/src/hdf/hdf5lib/structs/H5O_native_info_t.java
index ff801e8..70e5231 100644
--- a/java/src/hdf/hdf5lib/structs/H5O_native_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5O_native_info_t.java
@@ -20,12 +20,14 @@ import java.io.Serializable;
*/
public class H5O_native_info_t implements Serializable {
private static final long serialVersionUID = 7883826382952577189L;
-
- public H5O_hdr_info_t hdr_info; /* Object header information */
+ /** Object header information */
+ public H5O_hdr_info_t hdr_info;
/* Extra metadata storage for obj & attributes */
- public H5_ih_info_t obj_info; /* v1/v2 B-tree & local/fractal heap for groups, B-tree for chunked datasets */
- public H5_ih_info_t attr_info; /* v2 B-tree & heap for attributes */
+ /** v1/v2 B-tree and local/fractal heap for groups, B-tree for chunked datasets */
+ public H5_ih_info_t obj_info;
+ /** v2 B-tree and heap for attributes */
+ public H5_ih_info_t attr_info;
H5O_native_info_t (H5O_hdr_info_t oheader_info, H5_ih_info_t obj_info, H5_ih_info_t attr_info)
{
diff --git a/java/src/hdf/hdf5lib/structs/H5O_token_t.java b/java/src/hdf/hdf5lib/structs/H5O_token_t.java
index c7ac437..f0bb978 100644
--- a/java/src/hdf/hdf5lib/structs/H5O_token_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5O_token_t.java
@@ -23,12 +23,22 @@ import hdf.hdf5lib.HDF5Constants;
*/
public class H5O_token_t implements Serializable {
private static final long serialVersionUID = -4754320605310155032L;
+ /**
+ * Tokens are unique and permanent identifiers that are
+ * used to reference HDF5 objects in a container.
+ * Use basic byte array to store the dat
+ */
public byte[] data;
H5O_token_t (byte[] data) {
this.data = data;
}
+ /**
+ * Check if token data is undefined
+ *
+ * @return true if token data is undefined
+ */
public boolean isUndefined() {
return this.equals(HDF5Constants.H5O_TOKEN_UNDEF);
}
diff --git a/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java b/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
index eec50c2..0c6111b 100644
--- a/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
@@ -20,9 +20,9 @@ import java.io.Serializable;
*/
public class H5_ih_info_t implements Serializable {
private static final long serialVersionUID = -142238015615462707L;
- /** */
- public long index_size; /* btree and/or list */
- /** */
+ /** btree and/or list size of index */
+ public long index_size;
+ /** btree and/or list size of hp */
public long heap_size;
H5_ih_info_t (long index_size, long heap_size)