summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorculler <culler>2020-07-18 16:26:14 (GMT)
committerculler <culler>2020-07-18 16:26:14 (GMT)
commitfb48c78e68e6318476a8076f01661aa0626eb6ee (patch)
tree1632e82df98c332bbeae38baa5f94a3d8a8637e3
parent8ca73e50abb36b0005e07753db3412bcc7d8cd07 (diff)
parent4bca76be3bbc5ddc9b2e49ed021b0f84fa09812b (diff)
downloadtcl-fb48c78e68e6318476a8076f01661aa0626eb6ee.zip
tcl-fb48c78e68e6318476a8076f01661aa0626eb6ee.tar.gz
tcl-fb48c78e68e6318476a8076f01661aa0626eb6ee.tar.bz2
Merge 8.6
-rw-r--r--macosx/tclMacOSXNotify.c127
1 files changed, 87 insertions, 40 deletions
diff --git a/macosx/tclMacOSXNotify.c b/macosx/tclMacOSXNotify.c
index 8f1dbba..ee271e1 100644
--- a/macosx/tclMacOSXNotify.c
+++ b/macosx/tclMacOSXNotify.c
@@ -14,6 +14,18 @@
*/
#include "tclInt.h"
+
+/*
+ * In macOS 10.12 the os_unfair_lock was introduced as a replacement for the
+ * OSSpinLock, and the OSSpinLock was deprecated.
+ */
+
+#if MAC_OS_X_VERSION_MIN_REQUIRED >= 101200
+#define USE_OS_UNFAIR_LOCK
+#include <os/lock.h>
+#undef TCL_MAC_DEBUG_NOTIFIER
+#endif
+
#ifdef HAVE_COREFOUNDATION /* Traditional unix select-based notifier is
* in tclUnixNotfy.c */
#include <CoreFoundation/CoreFoundation.h>
@@ -21,6 +33,8 @@
/* #define TCL_MAC_DEBUG_NOTIFIER 1 */
+#if !defined(USE_OS_UNFAIR_LOCK)
+
/*
* We use the Darwin-native spinlock API rather than pthread mutexes for
* notifier locking: this radically simplifies the implementation and lowers
@@ -172,26 +186,45 @@ SpinLockTry(
#pragma GCC diagnostic pop
#endif /* HAVE_LIBKERN_OSATOMIC_H && HAVE_OSSPINLOCKLOCK */
+#endif /* not using os_unfair_lock */
/*
- * These spinlocks lock access to the global notifier state.
+ * These locks control access to the global notifier state.
*/
+#if defined(USE_OS_UNFAIR_LOCK)
+static os_unfair_lock notifierInitLock = OS_UNFAIR_LOCK_INIT;
+static os_unfair_lock notifierLock = OS_UNFAIR_LOCK_INIT;
+#else
static OSSpinLock notifierInitLock = SPINLOCK_INIT;
static OSSpinLock notifierLock = SPINLOCK_INIT;
+#endif
/*
- * Macros abstracting notifier locking/unlocking
+ * Macros that abstract notifier locking/unlocking
*/
+#if defined(USE_OS_UNFAIR_LOCK)
+#define LOCK_NOTIFIER_INIT os_unfair_lock_lock(&notifierInitLock)
+#define UNLOCK_NOTIFIER_INIT os_unfair_lock_unlock(&notifierInitLock)
+#define LOCK_NOTIFIER os_unfair_lock_lock(&notifierLock)
+#define UNLOCK_NOTIFIER os_unfair_lock_unlock(&notifierLock)
+#define LOCK_NOTIFIER_TSD os_unfair_lock_lock(&tsdPtr->tsdLock)
+#define UNLOCK_NOTIFIER_TSD os_unfair_lock_unlock(&tsdPtr->tsdLock)
+#else
#define LOCK_NOTIFIER_INIT SpinLockLock(&notifierInitLock)
#define UNLOCK_NOTIFIER_INIT SpinLockUnlock(&notifierInitLock)
#define LOCK_NOTIFIER SpinLockLock(&notifierLock)
#define UNLOCK_NOTIFIER SpinLockUnlock(&notifierLock)
#define LOCK_NOTIFIER_TSD SpinLockLock(&tsdPtr->tsdLock)
#define UNLOCK_NOTIFIER_TSD SpinLockUnlock(&tsdPtr->tsdLock)
+#endif
-#ifdef TCL_MAC_DEBUG_NOTIFIER
+/*
+ * The debug version of the Notifier only works if using OSSpinLock.
+ */
+
+#if defined(TCL_MAC_DEBUG_NOTIFIER) && !defined(USE_OS_UNFAIR_LOCK)
#define TclMacOSXNotifierDbgMsg(m, ...) \
do { \
fprintf(notifierLog?notifierLog:stderr, "tclMacOSXNotify.c:%d: " \
@@ -218,7 +251,7 @@ static OSSpinLock notifierLock = SPINLOCK_INIT;
#undef LOCK_NOTIFIER
#define LOCK_NOTIFIER SpinLockLockDbg(&notifierLock)
#undef LOCK_NOTIFIER_TSD
-#define LOCK_NOTIFIER_TSD SpinLockLockDbg(&tsdPtr->tsdLock)
+#define LOCK_NOTIFIER_TSD SpinLockLockDbg(tsdPtr->tsdLock)
#include <asl.h>
static FILE *notifierLog = NULL;
#ifndef NOTIFIER_LOG
@@ -325,8 +358,6 @@ typedef struct ThreadSpecificData {
int runLoopRunning; /* True if this thread's Tcl runLoop is
* running. */
int runLoopNestingLevel; /* Level of nested runLoop invocations. */
- int runLoopServicingEvents; /* True if this thread's runLoop is servicing
- * Tcl events. */
/* Must hold the notifierLock before accessing the following fields: */
/* Start notifierLock section */
@@ -339,9 +370,14 @@ typedef struct ThreadSpecificData {
* from these pointers. */
/* End notifierLock section */
+#if defined(USE_OS_UNFAIR_LOCK)
+ os_unfair_lock tsdLock;
+#else
OSSpinLock tsdLock; /* Must hold this lock before acessing the
* following fields from more than one
* thread. */
+#endif
+
/* Start tsdLock section */
SelectMasks checkMasks; /* This structure is used to build up the
* masks to be used in the next call to
@@ -526,7 +562,6 @@ Tcl_InitNotifier(void)
/*
* Initialize support for weakly imported spinlock API.
*/
-
if (pthread_once(&spinLockLockInitControl, SpinLockLockInit)) {
Tcl_Panic("Tcl_InitNotifier: pthread_once failed");
}
@@ -563,7 +598,7 @@ Tcl_InitNotifier(void)
bzero(&runLoopObserverContext, sizeof(CFRunLoopObserverContext));
runLoopObserverContext.info = tsdPtr;
runLoopObserver = CFRunLoopObserverCreate(NULL,
- kCFRunLoopEntry|kCFRunLoopExit|kCFRunLoopBeforeWaiting, TRUE,
+ kCFRunLoopEntry|kCFRunLoopExit, TRUE,
LONG_MIN, UpdateWaitingListAndServiceEvents,
&runLoopObserverContext);
if (!runLoopObserver) {
@@ -581,7 +616,7 @@ Tcl_InitNotifier(void)
*/
runLoopObserverTcl = CFRunLoopObserverCreate(NULL,
- kCFRunLoopEntry|kCFRunLoopExit|kCFRunLoopBeforeWaiting, TRUE,
+ kCFRunLoopEntry|kCFRunLoopExit, TRUE,
LONG_MIN, UpdateWaitingListAndServiceEvents,
&runLoopObserverContext);
if (!runLoopObserverTcl) {
@@ -597,7 +632,11 @@ Tcl_InitNotifier(void)
tsdPtr->runLoopObserverTcl = runLoopObserverTcl;
tsdPtr->runLoopTimer = NULL;
tsdPtr->waitTime = CF_TIMEINTERVAL_FOREVER;
+#if defined(USE_OS_UNFAIR_LOCK)
+ tsdPtr->tsdLock = OS_UNFAIR_LOCK_INIT;
+#else
tsdPtr->tsdLock = SPINLOCK_INIT;
+#endif
}
LOCK_NOTIFIER_INIT;
@@ -655,7 +694,6 @@ Tcl_InitNotifier(void)
ENABLE_ASL;
notifierCount++;
UNLOCK_NOTIFIER_INIT;
-
return tsdPtr;
}
@@ -1291,6 +1329,10 @@ Tcl_WaitForEvent(
Tcl_Panic("Tcl_WaitForEvent: Notifier not initialized");
}
+ /*
+ * A NULL timePtr means wait forever.
+ */
+
if (timePtr) {
Tcl_Time vTime = *timePtr;
@@ -1304,14 +1346,23 @@ Tcl_WaitForEvent(
tclScaleTimeProcPtr(&vTime, tclTimeClientData);
waitTime = vTime.sec + 1.0e-6 * vTime.usec;
} else {
+
/*
- * Polling: pretend to wait for files and tell the notifier thread
- * what we are doing. The notifier thread makes sure it goes
- * through select with its select mask in the same state as ours
- * currently is. We block until that happens.
+ * The max block time was set to 0.
+ *
+ * If we set the waitTime to 0, then the call to CFRunLoopInMode
+ * may return without processing all of its sources. The Apple
+ * documentation says that if the waitTime is 0 "only one pass is
+ * made through the run loop before returning; if multiple sources
+ * or timers are ready to fire immediately, only one (possibly two
+ * if one is a version 0 source) will be fired, regardless of the
+ * value of returnAfterSourceHandled." This can cause some chanio
+ * tests to fail. So we use a small positive waitTime unless there
+ * is another RunLoop running.
*/
polling = 1;
+ waitTime = tsdPtr->runLoopRunning ? 0 : 0.0001;
}
}
@@ -1324,18 +1375,18 @@ Tcl_WaitForEvent(
/*
* If the Tcl runloop is already running (e.g. if Tcl_WaitForEvent was
- * called recursively) or is servicing events via the runloop observer,
- * re-run it in a custom runloop mode containing only the source for the
- * notifier thread, otherwise wakeups from other sources added to the
- * common runloop modes might get lost or 3rd party event handlers might
- * get called when they do not expect to be.
+ * called recursively) start a new runloop in a custom runloop mode
+ * containing only the source for the notifier thread. Otherwise wakeups
+ * from other sources added to the common runloop mode might get lost or
+ * 3rd party event handlers might get called when they do not expect to
+ * be.
*/
runLoopRunning = tsdPtr->runLoopRunning;
tsdPtr->runLoopRunning = 1;
- runLoopStatus = CFRunLoopRunInMode(tsdPtr->runLoopServicingEvents ||
- runLoopRunning ? tclEventsOnlyRunLoopMode : kCFRunLoopDefaultMode,
- waitTime, TRUE);
+ runLoopStatus = CFRunLoopRunInMode(
+ runLoopRunning ? tclEventsOnlyRunLoopMode : kCFRunLoopDefaultMode,
+ waitTime, TRUE);
tsdPtr->runLoopRunning = runLoopRunning;
LOCK_NOTIFIER_TSD;
@@ -1453,7 +1504,6 @@ UpdateWaitingListAndServiceEvents(
void *info)
{
ThreadSpecificData *tsdPtr = (ThreadSpecificData *)info;
-
if (tsdPtr->sleeping) {
return;
}
@@ -1476,19 +1526,6 @@ UpdateWaitingListAndServiceEvents(
}
tsdPtr->runLoopNestingLevel--;
break;
- case kCFRunLoopBeforeWaiting:
- if (tsdPtr->runLoopTimer && !tsdPtr->runLoopServicingEvents &&
- (tsdPtr->runLoopNestingLevel > 1
- || !tsdPtr->runLoopRunning)) {
- tsdPtr->runLoopServicingEvents = 1;
- /*
- * This call seems to simply force event processing through and
- * prevents hangups that have long been observed with Tk-Cocoa.
- */
- Tcl_ServiceAll();
- tsdPtr->runLoopServicingEvents = 0;
- }
- break;
default:
break;
}
@@ -1521,7 +1558,7 @@ OnOffWaitingList(
{
int changeWaitingList;
-#ifdef TCL_MAC_DEBUG_NOTIFIER
+#if defined(TCL_MAC_DEBUG_NOTIFIER) && !defined(USE_OS_UNFAIR_LOCK)
if (SpinLockTry(&notifierLock)) {
Tcl_Panic("OnOffWaitingList: notifierLock unlocked");
}
@@ -2052,9 +2089,19 @@ AtForkChild(void)
{
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
- UNLOCK_NOTIFIER_TSD;
- UNLOCK_NOTIFIER;
- UNLOCK_NOTIFIER_INIT;
+ /*
+ * If a child process unlocks an os_unfair_lock that was created in its parent
+ * the child will exit with an illegal instruction error. So we reinitialize
+ * the lock in the child rather than attempt to unlock it.
+ */
+
+#if defined(USE_OS_UNFAIR_LOCK)
+ tsdPtr->tsdLock = OS_UNFAIR_LOCK_INIT;
+#else
+ UNLOCK_NOTIFIER_TSD;
+ UNLOCK_NOTIFIER;
+ UNLOCK_NOTIFIER_INIT;
+#endif
if (tsdPtr->runLoop) {
tsdPtr->runLoop = NULL;
if (!noCFafterFork) {