summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--generic/tcl.h2
-rw-r--r--generic/tclIO.c34
-rw-r--r--generic/tclInt.h27
-rw-r--r--generic/tclNotify.c486
-rw-r--r--tests/chanio.test7
5 files changed, 378 insertions, 178 deletions
diff --git a/generic/tcl.h b/generic/tcl.h
index 538821a..c080e93 100644
--- a/generic/tcl.h
+++ b/generic/tcl.h
@@ -1391,7 +1391,7 @@ struct Tcl_Event {
*/
typedef enum {
- TCL_QUEUE_TAIL, TCL_QUEUE_HEAD, TCL_QUEUE_MARK
+ TCL_QUEUE_TAIL, TCL_QUEUE_HEAD, TCL_QUEUE_MARK, TCL_QUEUE_RETARDED
} Tcl_QueuePosition;
/*
diff --git a/generic/tclIO.c b/generic/tclIO.c
index b761e1d..e2c4f32 100644
--- a/generic/tclIO.c
+++ b/generic/tclIO.c
@@ -8399,6 +8399,21 @@ Tcl_NotifyChannel(
tsdPtr->nestedHandlerPtr = nh.nestedHandlerPtr;
}
+static inline Tcl_Event *
+CreateChannelScheduledEvent(
+ Channel *chanPtr)
+{
+#ifdef SYNTHETIC_EVENT_TIME
+ Tcl_Time blckTime;
+
+ blckTime.sec = SYNTHETIC_EVENT_TIME / 1000000;
+ blckTime.usec = SYNTHETIC_EVENT_TIME % 1000000;
+ Tcl_SetMaxBlockTime(&blckTime);
+#endif
+ return TclpQueueEventClientData(ChannelScheduledProc, chanPtr,
+ TCL_QUEUE_RETARDED);
+}
+
/*
*----------------------------------------------------------------------
*
@@ -8492,12 +8507,7 @@ UpdateInterest(
mask &= ~TCL_EXCEPTION;
if (!statePtr->schedEvent) {
- Tcl_Event *evPtr = (Tcl_Event *)ckalloc(
- sizeof(Tcl_Event) + sizeof(Channel*));
- *(Channel**)(evPtr+1) = chanPtr;
- evPtr->proc = ChannelScheduledProc;
- statePtr->schedEvent = evPtr;
- Tcl_QueueEvent(evPtr, TCL_QUEUE_TAIL);
+ statePtr->schedEvent = CreateChannelScheduledEvent(chanPtr);
}
}
}
@@ -8544,13 +8554,13 @@ ChannelScheduledProc(
* before UpdateInterest gets called by Tcl_NotifyChannel.
*/
- statePtr->schedEvent->proc = ChannelScheduledProc; /* reattach to tail */
-
+ statePtr->schedEvent = CreateChannelScheduledEvent(chanPtr);
+
Tcl_Preserve(statePtr);
Tcl_NotifyChannel((Tcl_Channel) chanPtr, TCL_READABLE);
Tcl_Release(statePtr);
- return 1;
+ return 1; /* next cycle */
}
statePtr->schedEvent = NULL; /* event done. */
@@ -9179,11 +9189,7 @@ TclCopyChannel(
*/
if ((nonBlocking == CHANNEL_NONBLOCKING) && (toRead == 0)) {
- Tcl_Event *evPtr = (Tcl_Event *)ckalloc(
- sizeof(Tcl_Event) + sizeof(ClientData*));
- *(ClientData*)(evPtr+1) = csPtr;
- evPtr->proc = ZeroTransferEventProc;
- Tcl_QueueEvent(evPtr, TCL_QUEUE_TAIL);
+ TclpQueueEventClientData(ZeroTransferEventProc, csPtr, TCL_QUEUE_TAIL);
return 0;
}
diff --git a/generic/tclInt.h b/generic/tclInt.h
index 412a60d..da16cb3 100644
--- a/generic/tclInt.h
+++ b/generic/tclInt.h
@@ -3385,6 +3385,33 @@ MODULE_SCOPE void TclSetTimerEventMarker(int flags);
MODULE_SCOPE int TclServiceTimerEvents(void);
MODULE_SCOPE int TclServiceIdleEx(int flags, int count);
MODULE_SCOPE void TclpCancelEvent(Tcl_Event *evPtr);
+static inline Tcl_Event*
+TclpQueueEventEx(
+ Tcl_EventProc *proc, /* Event function to call if it servicing. */
+ ClientData extraData, /* Event extra data to be included and its */
+ size_t extraDataSize, /* extra size (to allocate and copy into). */
+ Tcl_QueuePosition position) /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
+ * TCL_QUEUE_MARK or TCL_QUEUE_RETARDED. */
+{
+ Tcl_Event *evPtr = ckalloc(sizeof(Tcl_Event) + extraDataSize);
+ evPtr->proc = proc;
+ memcpy((evPtr+1), extraData, extraDataSize);
+ Tcl_QueueEvent(evPtr, position);
+ return evPtr;
+}
+static inline Tcl_Event*
+TclpQueueEventClientData(
+ Tcl_EventProc *proc, /* Event function to call if it servicing. */
+ ClientData clientData, /* Event extra data to be included. */
+ Tcl_QueuePosition position) /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
+ * TCL_QUEUE_MARK or TCL_QUEUE_RETARDED. */
+{
+ Tcl_Event *evPtr = ckalloc(sizeof(Tcl_Event) + sizeof(clientData));
+ evPtr->proc = proc;
+ *(ClientData*)(evPtr+1) = clientData;
+ Tcl_QueueEvent(evPtr, position);
+ return evPtr;
+}
MODULE_SCOPE TclTimerEvent* TclpCreateTimerEvent(Tcl_WideInt usec,
Tcl_TimerProc *proc, Tcl_TimerDeleteProc *delProc,
size_t extraDataSize, int flags);
diff --git a/generic/tclNotify.c b/generic/tclNotify.c
index 3df10ae..c5eaf1f 100644
--- a/generic/tclNotify.c
+++ b/generic/tclNotify.c
@@ -67,10 +67,16 @@ typedef struct ThreadSpecificData {
* if none. */
Tcl_Event *timerMarkerPtr; /* Weak pointer to last event in the queue,
* before timer event generation */
+ Tcl_Event *firstRetardEv; /* First retarded event, or NULL if none. */
+ Tcl_Event *lastRetardEv; /* Last retarded event, or NULL if none. */
Tcl_Mutex queueMutex; /* Mutex to protect access to the previous
* three fields. */
+ size_t queueEpoch; /* Epoch of the queue (incremented if changed
+ * using TCL_QUEUE_HEAD or TCL_QUEUE_MARK). */
int serviceMode; /* One of TCL_SERVICE_NONE or
* TCL_SERVICE_ALL. */
+ size_t serviceLevel; /* Current (nested) level of event cycle. */
+ size_t blockTimeServLev; /* Level of the event cycle block time was set. */
int blockTimeSet; /* 0 means there is no maximum block time:
* block forever. */
Tcl_Time blockTime; /* If blockTimeSet is 1, gives the maximum
@@ -146,7 +152,7 @@ TclInitNotifier(void)
/* Empty loop body. */
}
- if (NULL == tsdPtr) {
+ if (NULL == tsdPtr || !tsdPtr->initialized) {
/*
* Notifier not yet initialized in this thread.
*/
@@ -203,8 +209,17 @@ TclFinalizeNotifier(void)
evPtr = evPtr->nextPtr;
ckfree(hold);
}
+ for (evPtr = tsdPtr->firstRetardEv; evPtr != NULL; ) {
+ hold = evPtr;
+ evPtr = evPtr->nextPtr;
+ ckfree(hold);
+ }
tsdPtr->firstEventPtr = NULL;
tsdPtr->lastEventPtr = NULL;
+ tsdPtr->markerEventPtr = NULL;
+ tsdPtr->timerMarkerPtr = NULL;
+ tsdPtr->firstRetardEv = NULL;
+ tsdPtr->lastRetardEv = NULL;
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
Tcl_MutexLock(&listLock);
@@ -379,7 +394,7 @@ Tcl_QueueEvent(
* property of the event queue. It will be
* freed after the event has been handled. */
Tcl_QueuePosition position) /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
- * TCL_QUEUE_MARK. */
+ * TCL_QUEUE_MARK or TCL_QUEUE_RETARDED. */
{
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
@@ -411,7 +426,7 @@ Tcl_ThreadQueueEvent(
* property of the event queue. It will be
* freed after the event has been handled. */
Tcl_QueuePosition position) /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
- * TCL_QUEUE_MARK. */
+ * TCL_QUEUE_MARK or TCL_QUEUE_RETARDED. */
{
ThreadSpecificData *tsdPtr;
@@ -437,6 +452,39 @@ Tcl_ThreadQueueEvent(
Tcl_MutexUnlock(&listLock);
}
+static inline void
+SpliceEventTail(
+ Tcl_Event *evPtr,
+ Tcl_Event **firstEvPtr,
+ Tcl_Event **lastEvPtr)
+{
+ evPtr->nextPtr = NULL;
+ if (*firstEvPtr == NULL) {
+ *firstEvPtr = evPtr;
+ } else {
+ (*lastEvPtr)->nextPtr = evPtr;
+ }
+ *lastEvPtr = evPtr;
+}
+
+static inline void
+LinkEvent(
+ ThreadSpecificData *tsdPtr,
+ Tcl_Event *evPtr,
+ Tcl_Event *prevPtr)
+{
+ if (prevPtr) {
+ evPtr->nextPtr = prevPtr->nextPtr;
+ prevPtr->nextPtr = evPtr;
+ } else {
+ evPtr->nextPtr = tsdPtr->firstEventPtr;
+ tsdPtr->firstEventPtr = evPtr;
+ }
+ if (evPtr->nextPtr == NULL) {
+ tsdPtr->lastEventPtr = evPtr;
+ }
+}
+
/*
*----------------------------------------------------------------------
*
@@ -468,22 +516,19 @@ QueueEvent(
* property of the event queue. It will be
* freed after the event has been handled. */
Tcl_QueuePosition position) /* One of TCL_QUEUE_TAIL, TCL_QUEUE_HEAD,
- * TCL_QUEUE_MARK. */
+ * TCL_QUEUE_MARK or TCL_QUEUE_RETARDED. */
{
Tcl_MutexLock(&(tsdPtr->queueMutex));
- if (position == TCL_QUEUE_TAIL) {
+ switch (position) {
+ case TCL_QUEUE_TAIL:
/*
* Append the event on the end of the queue.
*/
- evPtr->nextPtr = NULL;
- if (tsdPtr->firstEventPtr == NULL) {
- tsdPtr->firstEventPtr = evPtr;
- } else {
- tsdPtr->lastEventPtr->nextPtr = evPtr;
- }
- tsdPtr->lastEventPtr = evPtr;
- } else if (position == TCL_QUEUE_HEAD) {
+ SpliceEventTail(evPtr, &tsdPtr->firstEventPtr, &tsdPtr->lastEventPtr);
+
+ break;
+ case TCL_QUEUE_HEAD:
/*
* Push the event on the head of the queue.
*/
@@ -499,37 +544,73 @@ QueueEvent(
tsdPtr->timerMarkerPtr = evPtr;
}
- } else if (position == TCL_QUEUE_MARK) {
+ tsdPtr->queueEpoch++; /* queue may be changed in the middle */
+
+ break;
+ case TCL_QUEUE_MARK:
/*
* Insert the event after the current marker event and advance the
* marker to the new event.
*/
- if (tsdPtr->markerEventPtr == NULL) {
- evPtr->nextPtr = tsdPtr->firstEventPtr;
- tsdPtr->firstEventPtr = evPtr;
- } else {
- evPtr->nextPtr = tsdPtr->markerEventPtr->nextPtr;
- tsdPtr->markerEventPtr->nextPtr = evPtr;
- }
+ LinkEvent(tsdPtr, evPtr, tsdPtr->markerEventPtr);
tsdPtr->markerEventPtr = evPtr;
- if (evPtr->nextPtr == NULL) {
- tsdPtr->lastEventPtr = evPtr;
- }
/* move timer event hereafter */
if (tsdPtr->timerMarkerPtr == INT2PTR(-1)) {
tsdPtr->timerMarkerPtr = evPtr;
}
+
+ tsdPtr->queueEpoch++; /* queue may be changed in the middle */
+ break;
+ case TCL_QUEUE_RETARDED:
+ /*
+ * Append the event on the end of the retarded list.
+ * This guarantees the service earliest at the next event-cycle.
+ */
+
+ SpliceEventTail(evPtr, &tsdPtr->firstRetardEv, &tsdPtr->lastRetardEv);
+ break;
}
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
}
+static Tcl_Event *
+SearchEventInQueue(
+ Tcl_Event *firstEvPtr,
+ Tcl_Event *evPtr,
+ Tcl_Event **prevEvPtr)
+{
+ Tcl_Event *prevPtr = NULL;
+
+ /*
+ * Search event in the queue (if not first one).
+ */
+
+ if (evPtr != firstEvPtr) {
+
+ for (prevPtr = firstEvPtr;
+ prevPtr && prevPtr->nextPtr != evPtr;
+ prevPtr = prevPtr->nextPtr) {
+ /* Empty loop body. */
+ }
+ if (!prevPtr) {
+ /* not in queue */
+ evPtr = NULL;
+ }
+ }
+ if (prevEvPtr) {
+ *prevEvPtr = prevPtr;
+ }
+ return evPtr;
+}
+
static void
UnlinkEvent(
ThreadSpecificData *tsdPtr,
Tcl_Event *evPtr,
- Tcl_Event *prevPtr) {
+ Tcl_Event *prevPtr)
+{
/*
* Unlink it.
*/
@@ -539,14 +620,17 @@ UnlinkEvent(
} else {
prevPtr->nextPtr = evPtr->nextPtr;
}
+ if (evPtr->nextPtr == NULL) {
+ tsdPtr->lastEventPtr = prevPtr;
+ }
+
+ /* queue may be changed in the middle */
+ tsdPtr->queueEpoch++;
/*
- * Update 'last' and 'marker' events if either has been deleted.
+ * Update 'marker' events if either has been deleted.
*/
- if (evPtr->nextPtr == NULL) {
- tsdPtr->lastEventPtr = prevPtr;
- }
if (tsdPtr->markerEventPtr == evPtr) {
tsdPtr->markerEventPtr = prevPtr;
}
@@ -554,7 +638,38 @@ UnlinkEvent(
tsdPtr->timerMarkerPtr = prevPtr ? prevPtr : INT2PTR(-1);
}
}
-
+
+static void
+InvolveRetardedEvents(
+ ThreadSpecificData *tsdPtr)
+{
+ /* move retarded events at end of the queue */
+ if (tsdPtr->firstEventPtr == NULL) {
+ tsdPtr->firstEventPtr = tsdPtr->firstRetardEv;
+ } else {
+ tsdPtr->lastEventPtr->nextPtr = tsdPtr->firstRetardEv;
+ }
+ tsdPtr->lastEventPtr = tsdPtr->lastRetardEv;
+ /* reset retarded list */
+ tsdPtr->lastRetardEv = tsdPtr->firstRetardEv = NULL;
+}
+
+static void
+UnlinkRetardedEvent(
+ ThreadSpecificData *tsdPtr,
+ Tcl_Event *evPtr,
+ Tcl_Event *prevPtr)
+{
+ if (prevPtr == NULL) {
+ tsdPtr->firstRetardEv = evPtr->nextPtr;
+ } else {
+ prevPtr->nextPtr = evPtr->nextPtr;
+ }
+ if (evPtr->nextPtr == NULL) {
+ tsdPtr->lastRetardEv = prevPtr;
+ }
+}
+
/*
*----------------------------------------------------------------------
*
@@ -591,32 +706,39 @@ Tcl_DeleteEvents(
* Walk the queue of events for the thread, applying 'proc' to each to
* decide whether to eliminate the event.
*/
-
prevPtr = NULL;
evPtr = tsdPtr->firstEventPtr;
while (evPtr != NULL) {
Tcl_Event *nextPtr = evPtr->nextPtr;
if (proc(evPtr, clientData) == 1) {
- /*
- * This event should be deleted. Unlink it.
- */
-
+ /* This event should be deleted. Unlink and delete it. */
UnlinkEvent(tsdPtr, evPtr, prevPtr);
-
- /*
- * Delete the event data structure.
- */
-
ckfree(evPtr);
} else {
- /*
- * Event is to be retained.
- */
-
+ /* Event is to be retained. */
+ prevPtr = evPtr;
+ }
+ evPtr = nextPtr;
+ }
+
+ /*
+ * Do the same for the retarded list.
+ */
+ prevPtr = NULL;
+ evPtr = tsdPtr->firstRetardEv;
+ while (evPtr != NULL) {
+ Tcl_Event *nextPtr = evPtr->nextPtr;
+ if (proc(evPtr, clientData) == 1) {
+ /* This event should be deleted. Unlink and delete it. */
+ UnlinkRetardedEvent(tsdPtr, evPtr, prevPtr);
+ ckfree(evPtr);
+ } else {
+ /* Event is to be retained. */
prevPtr = evPtr;
}
evPtr = nextPtr;
}
+
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
}
@@ -625,37 +747,23 @@ TclpCancelEvent(
Tcl_Event *evPtr) /* Event to remove from queue. */
{
Tcl_Event *prevPtr = NULL;
-
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
Tcl_MutexLock(&(tsdPtr->queueMutex));
/*
- * Search event to unlink from queue.
+ * Search event to unlink from queue and delete it.
+ * Note the event can be in retarded list.
*/
- if (evPtr != tsdPtr->firstEventPtr) {
- for (prevPtr = tsdPtr->firstEventPtr;
- prevPtr && prevPtr->nextPtr != evPtr;
- prevPtr = prevPtr->nextPtr) {
- /* Empty loop body. */
- }
- if (!prevPtr) {
- evPtr = NULL; /* not in queue (already removed) */
- }
- }
-
- if (evPtr) {
- /*
- * Unlink it.
- */
-
+ if (SearchEventInQueue(tsdPtr->firstEventPtr, evPtr, &prevPtr)) {
UnlinkEvent(tsdPtr, evPtr, prevPtr);
-
- /*
- * Delete the event data structure.
- */
- ckfree((char *) evPtr);
+ ckfree(evPtr);
+ }
+ else
+ if (!SearchEventInQueue(tsdPtr->firstRetardEv, evPtr, &prevPtr)) {
+ UnlinkRetardedEvent(tsdPtr, evPtr, prevPtr);
+ ckfree(evPtr);
}
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
@@ -690,9 +798,10 @@ Tcl_ServiceEvent(
* matching this will be skipped for
* processing later. */
{
- Tcl_Event *evPtr, *prevPtr = NULL;
+ Tcl_Event *evPtr, *prevPtr;
Tcl_EventProc *proc;
int result;
+ size_t queueEpoch;
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
/*
@@ -734,17 +843,21 @@ Tcl_ServiceEvent(
goto processTimer;
}
+ /* Lock queue to process events */
+ Tcl_MutexLock(&(tsdPtr->queueMutex));
+
/*
* Loop through all the events in the queue until we find one that can
* actually be handled.
*/
- Tcl_MutexLock(&(tsdPtr->queueMutex));
- for (evPtr = tsdPtr->firstEventPtr;
+ for (prevPtr = NULL, evPtr = tsdPtr->firstEventPtr;
evPtr != NULL && tsdPtr->timerMarkerPtr != INT2PTR(-1);
- evPtr = evPtr->nextPtr
+ prevPtr = evPtr, evPtr = evPtr->nextPtr
) {
-
+
+ repeatCycle:
+
if (tsdPtr->timerMarkerPtr == evPtr) {
tsdPtr->timerMarkerPtr = INT2PTR(-1); /* timer marker reached */
}
@@ -767,11 +880,13 @@ Tcl_ServiceEvent(
proc = evPtr->proc;
if (proc == NULL) {
- prevPtr = evPtr;
continue;
}
evPtr->proc = NULL;
+ /* Save current queue epoch (if unchanged - the same prevPtr) */
+ queueEpoch = tsdPtr->queueEpoch;
+
/*
* Release the lock before calling the event function. This allows
* other threads to post events if we enter a recursive event loop in
@@ -783,58 +898,64 @@ Tcl_ServiceEvent(
result = proc(evPtr, flags);
Tcl_MutexLock(&(tsdPtr->queueMutex));
- if (result) {
+ /* If event processed or scheduled to be executed later (retarding) */
+ if (result || evPtr->proc) {
/*
- * The event was processed, so remove it from the queue.
+ * Check the queue was changed.
*/
- prevPtr = NULL;
- if (evPtr != tsdPtr->firstEventPtr) {
- for (prevPtr = tsdPtr->firstEventPtr;
- prevPtr && prevPtr->nextPtr != evPtr;
- prevPtr = prevPtr->nextPtr) {
- /* Empty loop body. */
- }
- if (!prevPtr) {
- evPtr = NULL;
- }
+ if (queueEpoch != tsdPtr->queueEpoch) {
+ /* queue may be changed in the middle */
+ queueEpoch = tsdPtr->queueEpoch;
+ /* try to find event */
+ evPtr = SearchEventInQueue(tsdPtr->firstEventPtr,
+ evPtr, &prevPtr);
}
- if (evPtr) {
- /* Detach event from queue */
+
+ /*
+ * If the handler set another function to process it later,
+ * do retarding of the event.
+ */
+ if (evPtr && evPtr->proc) {
+ /*
+ * Reattach the event on the end of the retarded list.
+ */
UnlinkEvent(tsdPtr, evPtr, prevPtr);
+ SpliceEventTail(evPtr,
+ &tsdPtr->firstRetardEv, &tsdPtr->lastRetardEv);
- /* If wanted to prolong (repeat) */
- if (evPtr->proc) {
- /*
- * Event was restored (prolonged) - sign to reattach to tail
- */
- if (evPtr != tsdPtr->lastEventPtr) {
- /* detach event from queue */
- UnlinkEvent(tsdPtr, evPtr, prevPtr);
- /* attach to tail */
- evPtr->nextPtr = NULL;
- if (tsdPtr->firstEventPtr == NULL) {
- tsdPtr->firstEventPtr = evPtr;
- } else {
- tsdPtr->lastEventPtr->nextPtr = evPtr;
- }
- tsdPtr->lastEventPtr = evPtr;
- }
+ /* next event to service */
+ if (prevPtr == NULL) {
+ /* we stood on begin of list - just repeat from new begin */
+ evPtr = tsdPtr->firstEventPtr;
} else {
- /* Free event */
- UnlinkEvent(tsdPtr, evPtr, prevPtr);
- ckfree(evPtr);
+ /* continue from next of previous event */
+ evPtr = prevPtr->nextPtr;
}
+ goto repeatCycle;
}
+
+ /*
+ * The event was processed, so remove it.
+ */
+ if (evPtr) {
+ /* Detach event from queue */
+ UnlinkEvent(tsdPtr, evPtr, prevPtr);
+
+ /* Free event */
+ ckfree(evPtr);
+ }
+
+ /* event processed - return with 1 */
Tcl_MutexUnlock(&(tsdPtr->queueMutex));
return 1;
+
} else {
/*
* The event wasn't actually handled, so we have to restore the
* proc field to allow the event to be attempted again.
*/
-
evPtr->proc = proc;
}
}
@@ -926,6 +1047,70 @@ CheckSourceThreshold(
}
#endif
+static int
+SetUpEventSources(
+ ThreadSpecificData *tsdPtr,
+ int flags)
+{
+ int res = 0;
+ EventSource *sourcePtr;
+
+ /*
+ * Set up all the event sources for new events. This will cause the
+ * block time to be updated if necessary.
+ */
+ tsdPtr->inTraversal++;
+ for (sourcePtr = tsdPtr->firstEventSourcePtr;
+ sourcePtr != NULL;
+ sourcePtr = sourcePtr->nextPtr
+ ) {
+ if (sourcePtr->checkProc) {
+ sourcePtr->setupProc(sourcePtr->clientData, flags);
+ res++;
+ }
+ }
+ tsdPtr->inTraversal--;
+
+ return res;
+}
+
+static int
+CheckEventSources(
+ ThreadSpecificData *tsdPtr,
+ int flags)
+{
+ int res = 0;
+ EventSource *sourcePtr;
+
+ /*
+ * Check all the event sources for new events.
+ */
+ for (sourcePtr = tsdPtr->firstEventSourcePtr;
+ sourcePtr != NULL;
+ sourcePtr = sourcePtr->nextPtr
+ ) {
+ if (sourcePtr->checkProc) {
+ sourcePtr->checkProc(sourcePtr->clientData, flags);
+ res++;
+ }
+ }
+
+ /*
+ * If we've some retarded events (from last event-cycle), attach they here
+ * to the tail of the event queue (new event-cycle).
+ */
+ if (tsdPtr->firstRetardEv) {
+ Tcl_MutexLock(&(tsdPtr->queueMutex));
+ if (tsdPtr->firstRetardEv) {
+ InvolveRetardedEvents(tsdPtr);
+ res++;
+ }
+ Tcl_MutexUnlock(&(tsdPtr->queueMutex));
+ }
+
+ return res;
+}
+
/*
*----------------------------------------------------------------------
*
@@ -949,7 +1134,6 @@ int
TclPeekEventQueued(
int flags)
{
- EventSource *sourcePtr;
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
int repeat = 1;
@@ -981,13 +1165,10 @@ TclPeekEventQueued(
/*
* Check all the event sources for new events.
*/
- for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
- sourcePtr = sourcePtr->nextPtr) {
- if (sourcePtr->checkProc) {
- (sourcePtr->checkProc)(sourcePtr->clientData, flags);
- }
+ if (!CheckEventSources(tsdPtr, flags)) {
+ return 0; /* no sources - no events could be created at all */
}
-
+
} while (repeat--);
return 0;
@@ -1124,6 +1305,10 @@ Tcl_SetMaxBlockTime(
*/
if (!tsdPtr->inTraversal) {
+ if (tsdPtr->blockTimeServLev < tsdPtr->serviceLevel) {
+ /* avoid resetting the blockTime set outside of traversal. */
+ tsdPtr->blockTimeServLev = tsdPtr->serviceLevel;
+ }
Tcl_SetTimer(&tsdPtr->blockTime);
}
}
@@ -1164,7 +1349,6 @@ Tcl_DoOneEvent(
* others defined by event sources. */
{
int result = 0, oldMode;
- EventSource *sourcePtr;
Tcl_Time *timePtr;
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
int blockTimeWasSet;
@@ -1181,6 +1365,15 @@ Tcl_DoOneEvent(
blockTimeWasSet = tsdPtr->blockTimeSet;
/*
+ * Set the service mode to none so notifier event routines won't try to
+ * service events recursively.
+ */
+
+ oldMode = tsdPtr->serviceMode;
+ tsdPtr->serviceMode = TCL_SERVICE_NONE;
+ tsdPtr->serviceLevel++;
+
+ /*
* Asynchronous event handlers are considered to be the highest priority
* events, and so must be invoked before we process events on the event
* queue.
@@ -1189,25 +1382,18 @@ Tcl_DoOneEvent(
if (flags & TCL_ASYNC_EVENTS) {
if (Tcl_AsyncReady()) {
(void) Tcl_AsyncInvoke(NULL, 0);
- return 1;
+ result = 1;
+ goto done;
}
/* Async only and don't wait - return */
if ( (flags & (TCL_ALL_EVENTS|TCL_DONT_WAIT))
== (TCL_ASYNC_EVENTS|TCL_DONT_WAIT) ) {
- return 0;
+ goto done;
}
}
/*
- * Set the service mode to none so notifier event routines won't try to
- * service events recursively.
- */
-
- oldMode = tsdPtr->serviceMode;
- tsdPtr->serviceMode = TCL_SERVICE_NONE;
-
- /*
* Main loop until servicing exact one event or block time resp.
* TCL_DONT_WAIT specified (infinite loop otherwise).
*/
@@ -1256,15 +1442,7 @@ Tcl_DoOneEvent(
* Set up all the event sources for new events. This will cause the
* block time to be updated if necessary.
*/
-
- tsdPtr->inTraversal = 1;
- for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
- sourcePtr = sourcePtr->nextPtr) {
- if (sourcePtr->setupProc) {
- sourcePtr->setupProc(sourcePtr->clientData, flags);
- }
- }
- tsdPtr->inTraversal = 0;
+ SetUpEventSources(tsdPtr, flags);
if (tsdPtr->blockTimeSet) {
timePtr = &tsdPtr->blockTime;
@@ -1278,6 +1456,7 @@ Tcl_DoOneEvent(
*/
wait:
result = Tcl_WaitForEvent(timePtr);
+ tsdPtr->blockTimeServLev = 0; /* reset block-time level (processed). */
if (result < 0) {
if (blockTimeWasSet) {
result = 0;
@@ -1288,13 +1467,7 @@ Tcl_DoOneEvent(
/*
* Check all the event sources for new events.
*/
-
- for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
- sourcePtr = sourcePtr->nextPtr) {
- if (sourcePtr->checkProc) {
- sourcePtr->checkProc(sourcePtr->clientData, flags);
- }
- }
+ CheckEventSources(tsdPtr, flags);
/*
* Check for events queued by the notifier or event sources.
@@ -1337,11 +1510,17 @@ Tcl_DoOneEvent(
break;
}
} while ( !(flags & TCL_DONT_WAIT) );
-
- /* Reset block time earliest at the end of event cycle */
- tsdPtr->blockTimeSet = 0;
+done:
+ /*
+ * Reset block time earliest at the end of event cycle and restore mode.
+ */
+ if (tsdPtr->blockTimeServLev < tsdPtr->serviceLevel) {
+ tsdPtr->blockTimeSet = 0;
+ tsdPtr->blockTimeServLev = 0;
+ }
tsdPtr->serviceMode = oldMode;
+ tsdPtr->serviceLevel--;
return result;
}
@@ -1369,7 +1548,6 @@ int
Tcl_ServiceAll(void)
{
int result = 0;
- EventSource *sourcePtr;
ThreadSpecificData *tsdPtr = TCL_TSD_INIT(&dataKey);
if (tsdPtr->serviceMode == TCL_SERVICE_NONE) {
@@ -1397,21 +1575,10 @@ Tcl_ServiceAll(void)
* so we can avoid multiple changes.
*/
- tsdPtr->inTraversal = 1;
tsdPtr->blockTimeSet = 0;
- for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
- sourcePtr = sourcePtr->nextPtr) {
- if (sourcePtr->setupProc) {
- sourcePtr->setupProc(sourcePtr->clientData, TCL_ALL_EVENTS);
- }
- }
- for (sourcePtr = tsdPtr->firstEventSourcePtr; sourcePtr != NULL;
- sourcePtr = sourcePtr->nextPtr) {
- if (sourcePtr->checkProc) {
- sourcePtr->checkProc(sourcePtr->clientData, TCL_ALL_EVENTS);
- }
- }
+ SetUpEventSources(tsdPtr, TCL_ALL_EVENTS);
+ CheckEventSources(tsdPtr, TCL_ALL_EVENTS);
while (Tcl_ServiceEvent(0)) {
result = 1;
@@ -1425,7 +1592,6 @@ Tcl_ServiceAll(void)
} else {
Tcl_SetTimer(&tsdPtr->blockTime);
}
- tsdPtr->inTraversal = 0;
tsdPtr->serviceMode = TCL_SERVICE_ALL;
return result;
}
diff --git a/tests/chanio.test b/tests/chanio.test
index 20ebd28..6270d83 100644
--- a/tests/chanio.test
+++ b/tests/chanio.test
@@ -7220,7 +7220,10 @@ test chan-io-54.1 {Recursive channel events} {socket fileevent} {
lappend result $next
if {$next == 1} {
chan event $s readable [namespace code [list readit $s 2]]
- vwait [namespace which -variable x]
+ if {![vwait 2000 [namespace which -variable x]]} {
+ set x failure
+ error "timeout: too long wait"
+ }
}
incr x
}
@@ -7249,9 +7252,7 @@ test chan-io-54.1 {Recursive channel events} {socket fileevent} {
lappend result [chan gets $cs]
chan configure $cs -blocking off
chan event $cs readable [namespace code [list readit $cs 1]]
- set a [after 2000 [namespace code { set x failure }]]
vwait [namespace which -variable x]
- after cancel $a
chan close $as
chan close $ss
chan close $cs