diff options
author | Robb Matzke <matzke@llnl.gov> | 1998-06-16 19:38:26 (GMT) |
---|---|---|
committer | Robb Matzke <matzke@llnl.gov> | 1998-06-16 19:38:26 (GMT) |
commit | 53916f4e5935ae7c36d7dd6e04d1c5e51b7e78ea (patch) | |
tree | 76c6163d98ac715ddec1dfe4fa69f8f2753636a0 /src/H5Tbit.c | |
parent | a639a5998c7bc5d9f00f95e0ee4157950b5e49eb (diff) | |
download | hdf5-53916f4e5935ae7c36d7dd6e04d1c5e51b7e78ea.zip hdf5-53916f4e5935ae7c36d7dd6e04d1c5e51b7e78ea.tar.gz hdf5-53916f4e5935ae7c36d7dd6e04d1c5e51b7e78ea.tar.bz2 |
[svn-r428] Changes since 19980612
----------------------
./src/H5Tbit.c
./MANIFEST
./test/Makefile.in
./test/bittests.c NEW
Finished the bit vector operations and added test cases.
./src/H5Tconv.c
./test/dtypes.c
Finished integer->integer general conversion and added test
cases. Overflows and underflows are handled by substituting
the closest possible value. Examples:
(unsigned)0xffff -> (unsigned) 0xff
( signed)0xffff -> (unsigned)0x0000
(unsigned)0xffff -> ( signed)0x7fff
( signed)0x7fff -> ( signed) 0x7f
( signed)0xbfff -> ( signed) 0xbf
( signed)0x8000 -> ( signed) 0x80
./src/H5private.h
Added definitions for MIN and MAX that take 3 or 4 arguments:
MIN3(), MIN4(), MAX3(), MAX4(). Also added MIN2() and MAX2()
as aliases for MIN() and MAX().
./test/tattr.c
Removed some redundant `&' operators.
./configure.in
./src/H5config.h.in [regenerated]
./src/H5.c
Fixed warnings on DEC where long double is the same as
double.
Diffstat (limited to 'src/H5Tbit.c')
-rw-r--r-- | src/H5Tbit.c | 192 |
1 files changed, 170 insertions, 22 deletions
diff --git a/src/H5Tbit.c b/src/H5Tbit.c index 0524dcc..b9a830d 100644 --- a/src/H5Tbit.c +++ b/src/H5Tbit.c @@ -32,25 +32,22 @@ void H5T_bit_copy (uint8 *dst, size_t dst_offset, const uint8 *src, size_t src_offset, size_t size) { - uintn shift; + intn shift; uintn mask_lo, mask_hi; intn s_idx, d_idx; - + /* - * Calculate shifts and masks. See diagrams below. MASK_LO in this - * example is 0x1f (the low five bits) and MASK_HI is 0xe0 (the high three - * bits). SHIFT is three since the source must be shifted right three bits - * to line up with the destination. + * Normalize the offset to be a byte number and a bit offset within that + * byte. */ - shift = (dst_offset%8)-(src_offset%8); - mask_lo = (1<<(8-shift))-1; - mask_hi = ((1<<shift)-1) << (8-shift); s_idx = src_offset / 8; d_idx = dst_offset / 8; + src_offset %= 8; + dst_offset %= 8; /* * Get things rolling. This means copying bits until we're aligned on a - * source byte. This the following example, four bits are copied to the + * source byte. This the following example, five bits are copied to the * destination. * * src[s_idx] @@ -64,11 +61,26 @@ H5T_bit_copy (uint8 *dst, size_t dst_offset, const uint8 *src, * ...+---------------+---------------+ * dst[d_idx+1] dst[d_idx] */ - if (src_offset%8 && size>0) { + while (src_offset && size>0) { + unsigned nbits = MIN3 (size, 8-dst_offset, 8-src_offset); + unsigned mask = (1<<nbits) - 1; + + dst[d_idx] &= ~(mask<<dst_offset); + dst[d_idx] |= ((src[s_idx]>>src_offset)&mask) << dst_offset; + + src_offset += nbits; + if (src_offset>=8) { + s_idx++; + src_offset %= 8; + } + dst_offset += nbits; + if (dst_offset>=8) { + d_idx++; + dst_offset %= 8; + } + size -= nbits; } - - /* * The middle bits. We are aligned on a source byte which needs to be * copied to two (or one in the degenerate case) destination bytes. @@ -84,24 +96,47 @@ H5T_bit_copy (uint8 *dst, size_t dst_offset, const uint8 *src, * +---------------+---------------+ * dst[d_idx+1] dst[d_idx] * + * + * Calculate shifts and masks. See diagrams below. MASK_LO in this + * example is 0x1f (the low five bits) and MASK_HI is 0xe0 (the high three + * bits). SHIFT is three since the source must be shifted right three bits + * to line up with the destination. */ + shift = dst_offset; + mask_lo = (1<<(8-shift))-1; + mask_hi = ~mask_lo; + for (/*void*/; size>8; size-=8, d_idx++, s_idx++) { if (shift) { - dst[d_idx+0] &= mask_lo; - dst[d_idx+0] |= (src[s_idx] << shift) & mask_hi; - dst[d_idx+1] &= mask_hi; - dst[d_idx+1] |= (src[s_idx] >> (8-shift)) & mask_lo; + dst[d_idx+0] &= ~(mask_lo<<shift); + dst[d_idx+0] |= (src[s_idx] & mask_lo) << shift; + dst[d_idx+1] &= ~(mask_hi>>(8-shift)); + dst[d_idx+1] |= (src[s_idx] & mask_hi) >> (8-shift); } else { dst[d_idx] = src[s_idx]; } } - - - /* Finish up */ - - + while (size>0) { + unsigned nbits = MIN3 (size, 8-dst_offset, 8-src_offset); + unsigned mask = (1<<nbits) - 1; + + dst[d_idx] &= ~(mask<<dst_offset); + dst[d_idx] |= ((src[s_idx]>>src_offset)&mask) << dst_offset; + + src_offset += nbits; + if (src_offset>=8) { + s_idx++; + src_offset %= 8; + } + dst_offset += nbits; + if (dst_offset>=8) { + d_idx++; + dst_offset %= 8; + } + size -= nbits; + } } @@ -123,6 +158,38 @@ H5T_bit_copy (uint8 *dst, size_t dst_offset, const uint8 *src, void H5T_bit_set (uint8 *buf, size_t offset, size_t size, hbool_t value) { + intn idx; + + /* Normalize */ + idx = offset / 8; + offset %= 8; + + /* The first partial byte */ + if (size && offset%8) { + size_t nbits = MIN (size, 8-offset); + unsigned mask = (1<<nbits)-1; + if (value) { + buf[idx++] |= mask << offset; + } else { + buf[idx++] &= ~(mask << offset); + } + size -= nbits; + } + + /* The middle bytes */ + while (size>=8) { + buf[idx++] = value ? 0xff : 0x00; + size -= 8; + } + + /* The last partial byte */ + if (size) { + if (value) { + buf[idx] |= (1<<size)-1; + } else { + buf[idx] &= ~((1<<size)-1); + } + } } @@ -150,5 +217,86 @@ ssize_t H5T_bit_find (uint8 *buf, size_t offset, size_t size, H5T_sdir_t direction, hbool_t value) { + size_t base=offset; + ssize_t idx, i; + + /* Some functions call this with value=TRUE */ + assert (TRUE==1); + + + switch (direction) { + case H5T_BIT_LSB: + /* Calculate index */ + idx = offset / 8; + offset %= 8; + + /* Beginning */ + if (offset) { + for (i=offset; i<8 && size>0; i++, size--) { + if (value==((buf[idx]>>i) & 0x01)) { + return 8*idx+i - base; + } + } + offset = 0; + idx++; + } + /* Middle */ + while (size>=8) { + if ((value?0x00:0xff)!=buf[idx]) { + for (i=0; i<8; i++) { + if (value==((buf[idx]>>i) & 0x01)) { + return 8*idx+i - base; + } + } + } + size -= 8; + idx++; + } + /* End */ + for (i=0; i<(ssize_t)size; i++) { + if (value==((buf[idx]>>i) & 0x01)) { + return 8*idx+i - base; + } + } + break; + + case H5T_BIT_MSB: + /* Calculate index */ + idx = (offset+size-1) / 8; + offset %= 8; + + /* Beginning */ + if (size>8-offset && (offset+size)%8) { + for (i=(offset+size)%8-1; i>=0; --i, --size) { + if (value==((buf[idx]>>i) & 0x01)) { + return 8*idx+i - base; + } + } + --idx; + } + /* Middle */ + while (size>=8) { + if ((value?0x00:0xff)!=buf[idx]) { + for (i=7; i>=0; --i) { + if (value==((buf[idx]>>i) & 0x01)) { + return 8*idx+i - base; + } + } + } + size -= 8; + --idx; + } + /* End */ + if (size>0) { + for (i=offset+size-1; i>=(ssize_t)offset; --i) { + if (value==((buf[idx]>>i) & 0x01)) { + return 8*idx+i - base; + } + } + } + break; + } + + return -1; } |