summaryrefslogtreecommitdiffstats
path: root/win/tclWinTime.c
diff options
context:
space:
mode:
Diffstat (limited to 'win/tclWinTime.c')
-rw-r--r--win/tclWinTime.c441
1 files changed, 274 insertions, 167 deletions
diff --git a/win/tclWinTime.c b/win/tclWinTime.c
index 64890c4..a137df4 100644
--- a/win/tclWinTime.c
+++ b/win/tclWinTime.c
@@ -53,7 +53,8 @@ typedef struct {
* initialized. */
int perfCounterAvailable; /* Flag == 1 if the hardware has a performance
* counter. */
- DWORD calibrationInterv; /* Calibration interval in seconds (start 1 sec) */
+ DWORD calibrationInterv; /* Calibration interval in seconds (start 1
+ * sec) */
HANDLE calibrationThread; /* Handle to the thread that keeps the virtual
* clock calibrated. */
HANDLE readyEvent; /* System event used to trigger the requesting
@@ -122,7 +123,8 @@ static TimeInfo timeInfo = {
*/
static struct {
int initialized; /* 1 if initialized, 0 otherwise */
- int perfCounter; /* 1 if performance counter usable for wide clicks */
+ int perfCounter; /* 1 if performance counter usable for wide
+ * clicks */
double microsecsScale; /* Denominator scale between clock / microsecs */
} wideClick = {0, 0, 0.0};
@@ -156,6 +158,23 @@ Tcl_ScaleTimeProc *tclScaleTimeProcPtr = NativeScaleTime;
ClientData tclTimeClientData = NULL;
/*
+ * Inlined version of Tcl_GetTime.
+ */
+
+static inline void
+GetTime(
+ Tcl_Time *timePtr)
+{
+ tclGetTimeProcPtr(timePtr, tclTimeClientData);
+}
+
+static inline int
+IsTimeNative(void)
+{
+ return tclGetTimeProcPtr == NativeGetTime;
+}
+
+/*
*----------------------------------------------------------------------
*
* TclpGetSeconds --
@@ -177,15 +196,16 @@ TclpGetSeconds(void)
{
long long usecSincePosixEpoch;
- /* Try to use high resolution timer */
- if ( tclGetTimeProcPtr == NativeGetTime
- && (usecSincePosixEpoch = NativeGetMicroseconds())
- ) {
+ /*
+ * Try to use high resolution timer
+ */
+
+ if (IsTimeNative() && (usecSincePosixEpoch = NativeGetMicroseconds())) {
return usecSincePosixEpoch / 1000000;
} else {
Tcl_Time t;
- tclGetTimeProcPtr(&t, tclTimeClientData); /* Tcl_GetTime inlined. */
+ GetTime(&t);
return t.sec;
}
}
@@ -214,11 +234,12 @@ TclpGetClicks(void)
{
long long usecSincePosixEpoch;
- /* Try to use high resolution timer */
- if ( tclGetTimeProcPtr == NativeGetTime
- && (usecSincePosixEpoch = NativeGetMicroseconds())
- ) {
- return (unsigned long)usecSincePosixEpoch;
+ /*
+ * Try to use high resolution timer.
+ */
+
+ if (IsTimeNative() && (usecSincePosixEpoch = NativeGetMicroseconds())) {
+ return (unsigned long) usecSincePosixEpoch;
} else {
/*
* Use the Tcl_GetTime abstraction to get the time in microseconds, as
@@ -227,8 +248,8 @@ TclpGetClicks(void)
Tcl_Time now; /* Current Tcl time */
- tclGetTimeProcPtr(&now, tclTimeClientData); /* Tcl_GetTime inlined */
- return (unsigned long)(now.sec * 1000000) + now.usec;
+ GetTime(&now);
+ return (unsigned long) (now.sec * 1000000) + now.usec;
}
}
@@ -264,6 +285,7 @@ TclpGetWideClicks(void)
* is consistent across all processors. Therefore, the frequency need
* only be queried upon application initialization.
*/
+
if (QueryPerformanceFrequency(&perfCounterFreq)) {
wideClick.perfCounter = 1;
wideClick.microsecsScale = 1000000.0 / perfCounterFreq.QuadPart;
@@ -310,7 +332,7 @@ double
TclpWideClickInMicrosec(void)
{
if (!wideClick.initialized) {
- (void)TclpGetWideClicks(); /* initialize */
+ (void) TclpGetWideClicks(); /* initialize */
}
return wideClick.microsecsScale;
}
@@ -337,21 +359,22 @@ TclpGetMicroseconds(void)
{
long long usecSincePosixEpoch;
- /* Try to use high resolution timer */
- if ( tclGetTimeProcPtr == NativeGetTime
- && (usecSincePosixEpoch = NativeGetMicroseconds())
- ) {
+ /*
+ * Try to use high resolution timer.
+ */
+
+ if (IsTimeNative() && (usecSincePosixEpoch = NativeGetMicroseconds())) {
return usecSincePosixEpoch;
} else {
/*
- * Use the Tcl_GetTime abstraction to get the time in microseconds, as
- * nearly as we can, and return it.
- */
+ * Use the Tcl_GetTime abstraction to get the time in microseconds, as
+ * nearly as we can, and return it.
+ */
Tcl_Time now;
- tclGetTimeProcPtr(&now, tclTimeClientData); /* Tcl_GetTime inlined */
- return (((long long)now.sec) * 1000000) + now.usec;
+ GetTime(&now);
+ return (((long long) now.sec) * 1000000) + now.usec;
}
}
@@ -383,14 +406,15 @@ Tcl_GetTime(
{
long long usecSincePosixEpoch;
- /* Try to use high resolution timer */
- if ( tclGetTimeProcPtr == NativeGetTime
- && (usecSincePosixEpoch = NativeGetMicroseconds())
- ) {
+ /*
+ * Try to use high resolution timer.
+ */
+
+ if (IsTimeNative() && (usecSincePosixEpoch = NativeGetMicroseconds())) {
timePtr->sec = (long) (usecSincePosixEpoch / 1000000);
timePtr->usec = (unsigned long) (usecSincePosixEpoch % 1000000);
} else {
- tclGetTimeProcPtr(timePtr, tclTimeClientData);
+ GetTime(timePtr);
}
}
@@ -424,6 +448,96 @@ NativeScaleTime(
/*
*----------------------------------------------------------------------
*
+ * IsPerfCounterAvailable --
+ *
+ * Tests whether the performance counter is available, which is a gnarly
+ * problem on 32-bit systems. Also retrieves the nominal frequency of the
+ * performance counter.
+ *
+ * Results:
+ * 1 if the counter is available, 0 if not.
+ *
+ * Side effects:
+ * Updates fields of the timeInfo global. Make sure you hold the lock
+ * before calling this.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline int
+IsPerfCounterAvailable(void)
+{
+ timeInfo.perfCounterAvailable =
+ QueryPerformanceFrequency(&timeInfo.nominalFreq);
+
+ /*
+ * Some hardware abstraction layers use the CPU clock in place of the
+ * real-time clock as a performance counter reference. This results in:
+ * - inconsistent results among the processors on multi-processor
+ * systems.
+ * - unpredictable changes in performance counter frequency on
+ * "gearshift" processors such as Transmeta and SpeedStep.
+ *
+ * There seems to be no way to test whether the performance counter is
+ * reliable, but a useful heuristic is that if its frequency is 1.193182
+ * MHz or 3.579545 MHz, it's derived from a colorburst crystal and is
+ * therefore the RTC rather than the TSC.
+ *
+ * A sloppier but serviceable heuristic is that the RTC crystal is
+ * normally less than 15 MHz while the TSC crystal is virtually assured to
+ * be greater than 100 MHz. Since Win98SE appears to fiddle with the
+ * definition of the perf counter frequency (perhaps in an attempt to
+ * calibrate the clock?), we use the latter rule rather than an exact
+ * match.
+ *
+ * We also assume (perhaps questionably) that the vendors have gotten
+ * their act together on Win64, so bypass all this rubbish on that
+ * platform.
+ */
+
+#if !defined(_WIN64)
+ if (timeInfo.perfCounterAvailable &&
+ /*
+ * The following lines would do an exact match on crystal
+ * frequency:
+ *
+ * timeInfo.nominalFreq.QuadPart != (long long) 1193182 &&
+ * timeInfo.nominalFreq.QuadPart != (long long) 3579545 &&
+ */
+ timeInfo.nominalFreq.QuadPart > (long long) 15000000) {
+ /*
+ * As an exception, if every logical processor on the system is on the
+ * same chip, we use the performance counter anyway, presuming that
+ * everyone's TSC is locked to the same oscillator.
+ */
+
+ SYSTEM_INFO systemInfo;
+ int regs[4];
+
+ GetSystemInfo(&systemInfo);
+ if (TclWinCPUID(0, regs) == TCL_OK
+ && regs[1] == 0x756E6547 /* "Genu" */
+ && regs[3] == 0x49656E69 /* "ineI" */
+ && regs[2] == 0x6C65746E /* "ntel" */
+ && TclWinCPUID(1, regs) == TCL_OK
+ && ((regs[0]&0x00000F00) == 0x00000F00 /* Pentium 4 */
+ || ((regs[0] & 0x00F00000) /* Extended family */
+ && (regs[3] & 0x10000000))) /* Hyperthread */
+ && (((regs[1]&0x00FF0000) >> 16)/* CPU count */
+ == (int)sy stemInfo.dwNumberOfProcessors)) {
+ timeInfo.perfCounterAvailable = TRUE;
+ } else {
+ timeInfo.perfCounterAvailable = FALSE;
+ }
+ }
+#endif /* above code is Win32 only */
+
+ return timeInfo.perfCounterAvailable;
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
* NativeGetMicroseconds --
*
* Gets the current system time in microseconds since the beginning
@@ -449,10 +563,10 @@ NativeCalc100NsTicks(
ULONGLONG fileTimeLastCall,
LONGLONG perfCounterLastCall,
LONGLONG curCounterFreq,
- LONGLONG curCounter
-) {
+ LONGLONG curCounter)
+{
return fileTimeLastCall +
- ((curCounter - perfCounterLastCall) * 10000000 / curCounterFreq);
+ ((curCounter - perfCounterLastCall) * 10000000 / curCounterFreq);
}
static long long
@@ -468,83 +582,15 @@ NativeGetMicroseconds(void)
if (!timeInfo.initialized) {
TclpInitLock();
if (!timeInfo.initialized) {
-
timeInfo.posixEpoch.LowPart = 0xD53E8000;
timeInfo.posixEpoch.HighPart = 0x019DB1DE;
- timeInfo.perfCounterAvailable =
- QueryPerformanceFrequency(&timeInfo.nominalFreq);
-
- /*
- * Some hardware abstraction layers use the CPU clock in place of
- * the real-time clock as a performance counter reference. This
- * results in:
- * - inconsistent results among the processors on
- * multi-processor systems.
- * - unpredictable changes in performance counter frequency on
- * "gearshift" processors such as Transmeta and SpeedStep.
- *
- * There seems to be no way to test whether the performance
- * counter is reliable, but a useful heuristic is that if its
- * frequency is 1.193182 MHz or 3.579545 MHz, it's derived from a
- * colorburst crystal and is therefore the RTC rather than the
- * TSC.
- *
- * A sloppier but serviceable heuristic is that the RTC crystal is
- * normally less than 15 MHz while the TSC crystal is virtually
- * assured to be greater than 100 MHz. Since Win98SE appears to
- * fiddle with the definition of the perf counter frequency
- * (perhaps in an attempt to calibrate the clock?), we use the
- * latter rule rather than an exact match.
- *
- * We also assume (perhaps questionably) that the vendors have
- * gotten their act together on Win64, so bypass all this rubbish
- * on that platform.
- */
-
-#if !defined(_WIN64)
- if (timeInfo.perfCounterAvailable
- /*
- * The following lines would do an exact match on crystal
- * frequency:
- * && timeInfo.nominalFreq.QuadPart != (long long)1193182
- * && timeInfo.nominalFreq.QuadPart != (long long)3579545
- */
- && timeInfo.nominalFreq.QuadPart > (long long) 15000000){
- /*
- * As an exception, if every logical processor on the system
- * is on the same chip, we use the performance counter anyway,
- * presuming that everyone's TSC is locked to the same
- * oscillator.
- */
-
- SYSTEM_INFO systemInfo;
- int regs[4];
-
- GetSystemInfo(&systemInfo);
- if (TclWinCPUID(0, regs) == TCL_OK
- && regs[1] == 0x756E6547 /* "Genu" */
- && regs[3] == 0x49656E69 /* "ineI" */
- && regs[2] == 0x6C65746E /* "ntel" */
- && TclWinCPUID(1, regs) == TCL_OK
- && ((regs[0]&0x00000F00) == 0x00000F00 /* Pentium 4 */
- || ((regs[0] & 0x00F00000) /* Extended family */
- && (regs[3] & 0x10000000))) /* Hyperthread */
- && (((regs[1]&0x00FF0000) >> 16)/* CPU count */
- == (int)systemInfo.dwNumberOfProcessors)) {
- timeInfo.perfCounterAvailable = TRUE;
- } else {
- timeInfo.perfCounterAvailable = FALSE;
- }
- }
-#endif /* above code is Win32 only */
-
/*
* If the performance counter is available, start a thread to
* calibrate it.
*/
- if (timeInfo.perfCounterAvailable) {
+ if (IsPerfCounterAvailable()) {
DWORD id;
InitializeCriticalSection(&timeInfo.cs);
@@ -578,7 +624,8 @@ NativeGetMicroseconds(void)
ULONGLONG fileTimeLastCall;
LONGLONG perfCounterLastCall, curCounterFreq;
- /* Copy with current data of calibration cycle */
+ /* Copy with current data of calibration
+ * cycle. */
LARGE_INTEGER curCounter;
/* Current performance counter. */
@@ -588,6 +635,7 @@ NativeGetMicroseconds(void)
/*
* Hold time section locked as short as possible
*/
+
EnterCriticalSection(&timeInfo.cs);
fileTimeLastCall = timeInfo.fileTimeLastCall.QuadPart;
@@ -599,8 +647,12 @@ NativeGetMicroseconds(void)
/*
* If calibration cycle occurred after we get curCounter
*/
+
if (curCounter.QuadPart <= perfCounterLastCall) {
- /* Calibrated file-time is saved from posix in 100-ns ticks */
+ /*
+ * Calibrated file-time is saved from posix in 100-ns ticks
+ */
+
return fileTimeLastCall / 10;
}
@@ -615,11 +667,14 @@ NativeGetMicroseconds(void)
*/
if (curCounter.QuadPart - perfCounterLastCall <
- 11 * curCounterFreq * timeInfo.calibrationInterv / 10
- ) {
- /* Calibrated file-time is saved from posix in 100-ns ticks */
+ 11 * curCounterFreq * timeInfo.calibrationInterv / 10) {
+ /*
+ * Calibrated file-time is saved from posix in 100-ns ticks.
+ */
+
return NativeCalc100NsTicks(fileTimeLastCall,
- perfCounterLastCall, curCounterFreq, curCounter.QuadPart) / 10;
+ perfCounterLastCall, curCounterFreq,
+ curCounter.QuadPart) / 10;
}
}
@@ -656,18 +711,20 @@ NativeGetTime(
/*
* Try to use high resolution timer.
*/
- if ( (usecSincePosixEpoch = NativeGetMicroseconds()) ) {
+
+ usecSincePosixEpoch = NativeGetMicroseconds();
+ if (usecSincePosixEpoch) {
timePtr->sec = (long) (usecSincePosixEpoch / 1000000);
timePtr->usec = (unsigned long) (usecSincePosixEpoch % 1000000);
} else {
/*
- * High resolution timer is not available. Just use ftime.
- */
+ * High resolution timer is not available. Just use ftime.
+ */
struct _timeb t;
_ftime(&t);
- timePtr->sec = (long)t.time;
+ timePtr->sec = (long) t.time;
timePtr->usec = t.millitm * 1000;
}
}
@@ -735,9 +792,9 @@ TclpGetDate(
struct tm *tmPtr;
time_t time;
#if defined(_WIN64) || (defined(_USE_64BIT_TIME_T) || (defined(_MSC_VER) && _MSC_VER < 1400))
-# define t2 *t /* no need to cripple time to 32-bit */
+# define t2 *t /* no need to cripple time to 32-bit */
#else
- time_t t2 = *(__time32_t *)t;
+ time_t t2 = *(__time32_t *) t;
#endif
if (!useGMT) {
@@ -789,14 +846,14 @@ TclpGetDate(
time -= 60;
}
- time = tmPtr->tm_min + time/60;
+ time = tmPtr->tm_min + time / 60;
tmPtr->tm_min = (int)(time % 60);
if (tmPtr->tm_min < 0) {
tmPtr->tm_min += 60;
time -= 60;
}
- time = tmPtr->tm_hour + time/60;
+ time = tmPtr->tm_hour + time / 60;
tmPtr->tm_hour = (int)(time % 24);
if (tmPtr->tm_hour < 0) {
tmPtr->tm_hour += 24;
@@ -804,9 +861,9 @@ TclpGetDate(
}
time /= 24;
- tmPtr->tm_mday += (int)time;
- tmPtr->tm_yday += (int)time;
- tmPtr->tm_wday = (tmPtr->tm_wday + (int)time) % 7;
+ tmPtr->tm_mday += (int) time;
+ tmPtr->tm_yday += (int) time;
+ tmPtr->tm_wday = (tmPtr->tm_wday + (int) time) % 7;
}
} else {
tmPtr = ComputeGMT(&t2);
@@ -847,8 +904,8 @@ ComputeGMT(
* Compute the 4 year span containing the specified time.
*/
- tmp = (long)(*tp / SECSPER4YEAR);
- rem = (long)(*tp % SECSPER4YEAR);
+ tmp = (long) (*tp / SECSPER4YEAR);
+ rem = (long) (*tp % SECSPER4YEAR);
/*
* Correct for weird mod semantics so the remainder is always positive.
@@ -915,7 +972,7 @@ ComputeGMT(
* Compute day of week. Epoch started on a Thursday.
*/
- tmPtr->tm_wday = (long)(*tp / SECSPERDAY) + 4;
+ tmPtr->tm_wday = (long) (*tp / SECSPERDAY) + 4;
if ((*tp % SECSPERDAY) < 0) {
tmPtr->tm_wday--;
}
@@ -970,7 +1027,11 @@ CalibrationThread(
QueryPerformanceFrequency(&timeInfo.curCounterFreq);
timeInfo.fileTimeLastCall.LowPart = curFileTime.dwLowDateTime;
timeInfo.fileTimeLastCall.HighPart = curFileTime.dwHighDateTime;
- /* Calibrated file-time will be saved from posix in 100-ns ticks */
+
+ /*
+ * Calibrated file-time will be saved from posix in 100-ns ticks.
+ */
+
timeInfo.fileTimeLastCall.QuadPart -= timeInfo.posixEpoch.QuadPart;
ResetCounterSamples(timeInfo.fileTimeLastCall.QuadPart,
@@ -1030,10 +1091,11 @@ UpdateTimeEachSecond(void)
/* Current value returned from
* QueryPerformanceCounter. */
FILETIME curSysTime; /* Current system time. */
- static LARGE_INTEGER lastFileTime; /* File time of the previous calibration */
+ static LARGE_INTEGER lastFileTime;
+ /* File time of the previous calibration */
LARGE_INTEGER curFileTime; /* File time at the time this callback was
* scheduled. */
- long long estFreq; /* Estimated perf counter frequency. */
+ long long estFreq; /* Estimated perf counter frequency. */
long long vt0; /* Tcl time right now. */
long long vt1; /* Tcl time one second from now. */
long long tdiff; /* Difference between system clock and Tcl
@@ -1049,12 +1111,17 @@ UpdateTimeEachSecond(void)
curFileTime.LowPart = curSysTime.dwLowDateTime;
curFileTime.HighPart = curSysTime.dwHighDateTime;
curFileTime.QuadPart -= timeInfo.posixEpoch.QuadPart;
- /* If calibration still not needed (check for possible time switch) */
- if ( curFileTime.QuadPart > lastFileTime.QuadPart
- && curFileTime.QuadPart < lastFileTime.QuadPart +
- (timeInfo.calibrationInterv * 10000000)
- ) {
- /* again in next one second */
+
+ /*
+ * If calibration still not needed (check for possible time switch)
+ */
+
+ if (curFileTime.QuadPart > lastFileTime.QuadPart && curFileTime.QuadPart <
+ lastFileTime.QuadPart + (timeInfo.calibrationInterv * 10000000)) {
+ /*
+ * Look again in next one second.
+ */
+
return;
}
QueryPerformanceCounter(&curPerfCounter);
@@ -1110,8 +1177,9 @@ UpdateTimeEachSecond(void)
*/
vt0 = NativeCalc100NsTicks(timeInfo.fileTimeLastCall.QuadPart,
- timeInfo.perfCounterLastCall.QuadPart, timeInfo.curCounterFreq.QuadPart,
- curPerfCounter.QuadPart);
+ timeInfo.perfCounterLastCall.QuadPart,
+ timeInfo.curCounterFreq.QuadPart, curPerfCounter.QuadPart);
+
/*
* If we've gotten more than a second away from system time, then drifting
* the clock is going to be pretty hopeless. Just let it jump. Otherwise,
@@ -1120,17 +1188,25 @@ UpdateTimeEachSecond(void)
tdiff = vt0 - curFileTime.QuadPart;
if (tdiff > 10000000 || tdiff < -10000000) {
- /* jump to current system time, use curent estimated frequency */
+ /*
+ * Jump to current system time, use curent estimated frequency.
+ */
+
vt0 = curFileTime.QuadPart;
} else {
- /* calculate new frequency and estimate drift to the next second */
+ /*
+ * Calculate new frequency and estimate drift to the next second.
+ */
+
vt1 = 20000000 + curFileTime.QuadPart;
driftFreq = (estFreq * 20000000 / (vt1 - vt0));
+
/*
- * Avoid too large drifts (only half of the current difference),
- * that allows also be more accurate (aspire to the smallest tdiff),
- * so then we can prolong calibration interval by tdiff < 100000
+ * Avoid too large drifts (only half of the current difference), that
+ * allows also be more accurate (aspire to the smallest tdiff), so
+ * then we can prolong calibration interval by tdiff < 100000
*/
+
driftFreq = timeInfo.curCounterFreq.QuadPart +
(driftFreq - timeInfo.curCounterFreq.QuadPart) / 2;
@@ -1138,50 +1214,78 @@ UpdateTimeEachSecond(void)
* Average between estimated, 2 current and 5 drifted frequencies,
* (do the soft drifting as possible)
*/
- estFreq = (estFreq + 2 * timeInfo.curCounterFreq.QuadPart + 5 * driftFreq) / 8;
+
+ estFreq = (estFreq + 2 * timeInfo.curCounterFreq.QuadPart +
+ 5 * driftFreq) / 8;
}
- /* Avoid too large discrepancy from nominal frequency */
- if (estFreq > 1003*timeInfo.nominalFreq.QuadPart/1000) {
- estFreq = 1003*timeInfo.nominalFreq.QuadPart/1000;
+ /*
+ * Avoid too large discrepancy from nominal frequency.
+ */
+
+ if (estFreq > 1003 * timeInfo.nominalFreq.QuadPart / 1000) {
+ estFreq = 1003 * timeInfo.nominalFreq.QuadPart / 1000;
vt0 = curFileTime.QuadPart;
- } else if (estFreq < 997*timeInfo.nominalFreq.QuadPart/1000) {
- estFreq = 997*timeInfo.nominalFreq.QuadPart/1000;
+ } else if (estFreq < 997 * timeInfo.nominalFreq.QuadPart / 1000) {
+ estFreq = 997 * timeInfo.nominalFreq.QuadPart / 1000;
vt0 = curFileTime.QuadPart;
} else if (vt0 != curFileTime.QuadPart) {
/*
- * Be sure the clock ticks never backwards (avoid it by negative drifting)
- * just compare native time (in 100-ns) before and hereafter using
- * new calibrated values) and do a small adjustment (short time freeze)
+ * Be sure the clock ticks never backwards (avoid it by negative
+ * drifting). Just compare native time (in 100-ns) before and
+ * hereafter using new calibrated values) and do a small adjustment
+ * (short time freeze).
*/
+
LARGE_INTEGER newPerfCounter;
long long nt0, nt1;
QueryPerformanceCounter(&newPerfCounter);
nt0 = NativeCalc100NsTicks(timeInfo.fileTimeLastCall.QuadPart,
- timeInfo.perfCounterLastCall.QuadPart, timeInfo.curCounterFreq.QuadPart,
- newPerfCounter.QuadPart);
+ timeInfo.perfCounterLastCall.QuadPart,
+ timeInfo.curCounterFreq.QuadPart, newPerfCounter.QuadPart);
nt1 = NativeCalc100NsTicks(vt0,
- curPerfCounter.QuadPart, estFreq,
- newPerfCounter.QuadPart);
- if (nt0 > nt1) { /* drifted backwards, try to compensate with new base */
- /* first adjust with a micro jump (short frozen time is acceptable) */
+ curPerfCounter.QuadPart, estFreq, newPerfCounter.QuadPart);
+ if (nt0 > nt1) {
+ /*
+ * Drifted backwards, try to compensate with new base.
+ *
+ * First adjust with a micro jump (short frozen time is
+ * acceptable).
+ */
vt0 += nt0 - nt1;
- /* if drift unavoidable (e. g. we had a time switch), then reset it */
+
+ /*
+ * If drift unavoidable (e. g. we had a time switch), then reset
+ * it.
+ */
+
vt1 = vt0 - curFileTime.QuadPart;
if (vt1 > 10000000 || vt1 < -10000000) {
- /* larger jump resp. shift relative new file-time */
+ /*
+ * Larger jump resp. shift relative new file-time.
+ */
+
vt0 = curFileTime.QuadPart;
}
}
}
- /* In lock commit new values to timeInfo (hold lock as short as possible) */
+ /*
+ * In lock commit new values to timeInfo (hold lock as short as possible)
+ */
+
EnterCriticalSection(&timeInfo.cs);
- /* grow calibration interval up to 10 seconds (if still precise enough) */
+ /*
+ * Grow calibration interval up to 10 seconds (if still precise enough)
+ */
+
if (tdiff < -100000 || tdiff > 100000) {
- /* too long drift - reset calibration interval to 1000 second */
+ /*
+ * Too long drift. Reset calibration interval to 1000 second.
+ */
+
timeInfo.calibrationInterv = 1;
} else if (timeInfo.calibrationInterv < 10) {
timeInfo.calibrationInterv++;
@@ -1215,12 +1319,13 @@ UpdateTimeEachSecond(void)
static void
ResetCounterSamples(
- unsigned long long fileTime, /* Current file time */
+ unsigned long long fileTime,/* Current file time */
long long perfCounter, /* Current performance counter */
- long long perfFreq) /* Target performance frequency */
+ long long perfFreq) /* Target performance frequency */
{
int i;
- for (i=SAMPLES-1 ; i>=0 ; --i) {
+
+ for (i = SAMPLES - 1 ; i >= 0 ; --i) {
timeInfo.perfCounterSample[i] = perfCounter;
timeInfo.fileTimeSample[i] = fileTime;
perfCounter -= perfFreq;
@@ -1260,15 +1365,17 @@ AccumulateSample(
long long perfCounter,
unsigned long long fileTime)
{
- unsigned long long workFTSample; /* File time sample being removed from or
+ unsigned long long workFTSample;
+ /* File time sample being removed from or
* added to the circular buffer. */
long long workPCSample; /* Performance counter sample being removed
* from or added to the circular buffer. */
- unsigned long long lastFTSample; /* Last file time sample recorded */
+ unsigned long long lastFTSample;
+ /* Last file time sample recorded */
long long lastPCSample; /* Last performance counter sample recorded */
long long FTdiff; /* Difference between last FT and current */
long long PCdiff; /* Difference between last PC and current */
- long long estFreq; /* Estimated performance counter frequency */
+ long long estFreq; /* Estimated performance counter frequency */
/*
* Test for jumps and reset the samples if we have one.
@@ -1347,8 +1454,8 @@ TclpGmtime(
#if defined(_WIN64) || defined(_USE_64BIT_TIME_T) || (defined(_MSC_VER) && _MSC_VER < 1400)
return gmtime(timePtr);
#else
- return _gmtime32((const __time32_t *)timePtr);
-#endif
+ return _gmtime32((const __time32_t *) timePtr);
+#endif /* _WIN64 || _USE_64BIT_TIME_T || _MSC_VER < 1400 */
}
/*
@@ -1382,8 +1489,8 @@ TclpLocaltime(
#if defined(_WIN64) || defined(_USE_64BIT_TIME_T) || (defined(_MSC_VER) && _MSC_VER < 1400)
return localtime(timePtr);
#else
- return _localtime32((const __time32_t *)timePtr);
-#endif
+ return _localtime32((const __time32_t *) timePtr);
+#endif /* _WIN64 || _USE_64BIT_TIME_T || _MSC_VER < 1400 */
}
#endif /* TCL_NO_DEPRECATED */