summaryrefslogtreecommitdiffstats
path: root/src/H5Fprivate.h
diff options
context:
space:
mode:
authorAlbert Cheng <acheng@hdfgroup.org>1999-03-09 23:06:07 (GMT)
committerAlbert Cheng <acheng@hdfgroup.org>1999-03-09 23:06:07 (GMT)
commit2c3234ef72e359439918da691dab5aa78baa0dbb (patch)
treefda04f4bdf76ad982a75e757fde5c629989137cc /src/H5Fprivate.h
parent362894fbb1b798dbbe81f193c6db1ee27eda5182 (diff)
downloadhdf5-2c3234ef72e359439918da691dab5aa78baa0dbb.zip
hdf5-2c3234ef72e359439918da691dab5aa78baa0dbb.tar.gz
hdf5-2c3234ef72e359439918da691dab5aa78baa0dbb.tar.bz2
[svn-r1128] Changes for T3E port.
H5Flow.c: Document previous changes (same changes committed by Robb first.) H5Fprivate.h: Change DECODE macros to do sign extension. (Sign extension can be skipped if machines use the exact sizes.) H5Sall.c: Fixed a typo in return value. H5private.h: Fixed a typo and also changed the strategy of the int16_t and uint16_t typedef. Now will use short as long as it is at least 2 bytes big.
Diffstat (limited to 'src/H5Fprivate.h')
-rw-r--r--src/H5Fprivate.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 71b6ca4..cd2b17c 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -117,9 +117,18 @@
(p) = (uint8_t*)(p)+8; \
}
+/* DECODE converts little endian bytes pointed by p to integer values and store
+ * it in i. For signed values, need to do sign-extension when converting
+ * the last byte which carries the sign bit.
+ * The macros does not require i be of a certain byte sizes. It just requires
+ * i be big enough to hold the intended value range. E.g. INT16DECODE works
+ * correctly even if i is actually a 64bit int like in a Cray.
+ */
+
# define INT16DECODE(p, i) { \
(i) = (int16_t)((*(p) & 0xff)); (p)++; \
- (i) |= (int16_t)((*(p) & 0xff) << 8); (p)++; \
+ (i) |= (int16_t)(((*(p) & 0xff) << 8) | \
+ ((*(p) & 0x80) ? ~0xffff : 0x0)); (p)++; \
}
# define UINT16DECODE(p, i) { \
@@ -131,7 +140,8 @@
(i) = ( *(p) & 0xff); (p)++; \
(i) |= ((int32_t)(*(p) & 0xff) << 8); (p)++; \
(i) |= ((int32_t)(*(p) & 0xff) << 16); (p)++; \
- (i) |= ((int32_t)(*(p) & 0xff) << 24); (p)++; \
+ (i) |= ((int32_t)(((*(p) & 0xff) << 24) | \
+ ((*(p) & 0x80) ? ~0xffffffff : 0x0))); (p)++; \
}
# define UINT32DECODE(p, i) { \