summaryrefslogtreecommitdiffstats
path: root/programs/util.h
diff options
context:
space:
mode:
authorYann Collet <cyan@fb.com>2018-02-20 21:05:22 (GMT)
committerYann Collet <cyan@fb.com>2018-02-20 21:09:13 (GMT)
commitae3dededed1c6c1a9b4c631d9edba690c4abb59c (patch)
tree247aa24692ecbc82e1979269350e663a7a570c81 /programs/util.h
parent1a233c5f0fb665fa6b65c856b150bdef92654b42 (diff)
downloadlz4-ae3dededed1c6c1a9b4c631d9edba690c4abb59c.zip
lz4-ae3dededed1c6c1a9b4c631d9edba690c4abb59c.tar.gz
lz4-ae3dededed1c6c1a9b4c631d9edba690c4abb59c.tar.bz2
ensure bench speed measurement is more accurate for small inputs
Previous method would produce too many time() invocations, becoming a significant fraction of workload measured. The new strategy is to use time() only once per batch, and dynamically resize batch size so that each round lasts approximately 1 second. This only matters for small inputs. Measurement for large files (such as silesia.tar) are much less impacted (though decoding speed is so fast that even medium-size files will notice an improvement).
Diffstat (limited to 'programs/util.h')
-rw-r--r--programs/util.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/programs/util.h b/programs/util.h
index a3576d7..ff25106 100644
--- a/programs/util.h
+++ b/programs/util.h
@@ -180,7 +180,7 @@ extern "C" {
mach_timebase_info(&rate);
init = 1;
}
- return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom))/1000ULL;
+ return (((clockEnd - clockStart) * (U64)rate.numer) / ((U64)rate.denom)) / 1000ULL;
}
UTIL_STATIC U64 UTIL_getSpanTimeNano(UTIL_time_t clockStart, UTIL_time_t clockEnd)
{
@@ -249,6 +249,12 @@ UTIL_STATIC U64 UTIL_clockSpanMicro(UTIL_time_t clockStart)
return UTIL_getSpanTimeMicro(clockStart, clockEnd);
}
+/* returns time span in nanoseconds */
+UTIL_STATIC U64 UTIL_clockSpanNano(UTIL_time_t clockStart)
+{
+ UTIL_time_t const clockEnd = UTIL_getTime();
+ return UTIL_getSpanTimeNano(clockStart, clockEnd);
+}
UTIL_STATIC void UTIL_waitForNextTick(void)
{