From: Chas Williams Date: Tue, 1 Jan 2002 18:49:18 +0000 (+0000) Subject: linux-osi-sleep-avoid-forgetting-events-20020101 X-Git-Tag: openafs-devel-1_3_0~99 X-Git-Url: https://git.michaelhowe.org/gitweb/?a=commitdiff_plain;h=161c73a807128dc40ad20a26b01cade534f6d1dc;p=packages%2Fo%2Fopenafs.git linux-osi-sleep-avoid-forgetting-events-20020101 based on suggestion from Ted Anderson "the changes make more sense than the code as it currently exists. the only think i am nervous about is the dontSleep delete. while it makes more sense to just not wakeup sleepers if none exist, i suppose its possible that some bit of afs code wants acausal (wake before sleep) events. that does seem quite unlikely. just looking at the sleep on solaris, it checks the seq number to get the next event not a previous event. i imported the changes and make the fixup in osi_stoplistener(). i dropped some of the silly syntax changes that junked up the diff -- this makes it a bit easier to see what was changed. i just added an assert in afs_addevent for quality assurance purposes." ==================== This delta was composed from multiple commits as part of the CVS->Git migration. The checkin message with each commit was inconsistent. The following are the additional commit messages. ==================== fix for osi_StopListener so it does the rigth thing --- diff --git a/src/afs/LINUX/osi_sleep.c b/src/afs/LINUX/osi_sleep.c index 36346e3be..bbe94e694 100644 --- a/src/afs/LINUX/osi_sleep.c +++ b/src/afs/LINUX/osi_sleep.c @@ -25,7 +25,7 @@ static int osi_TimedSleep(char *event, afs_int32 ams, int aintok); void afs_osi_Wakeup(char *event); void afs_osi_Sleep(char *event); -static char waitV; +static char waitV, dummyV; #if ! defined(AFS_GLOBAL_SUNLOCK) @@ -96,15 +96,14 @@ int afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok) if (ahandle) ahandle->proc = (caddr_t) current; + AFS_ASSERT_GLOCK(); do { - AFS_ASSERT_GLOCK(); - code = 0; #if defined(AFS_GLOBAL_SUNLOCK) code = osi_TimedSleep(&waitV, ams, 1); - if (code) { - if (aintok) break; + if (code == EINTR) { + if (aintok) + return EINTR; flush_signals(current); - code = 0; } #else timer = afs_osi_CallProc(AfsWaitHack, (char *) current, ams); @@ -113,10 +112,10 @@ int afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok) #endif /* AFS_GLOBAL_SUNLOCK */ if (ahandle && (ahandle->proc == (caddr_t) 0)) { /* we've been signalled */ - break; + return EINTR; } } while (osi_Time() < endTime); - return code; + return 0; } @@ -129,9 +128,6 @@ typedef struct afs_event { int seq; /* Sequence number: this is incremented by wakeup calls; wait will not return until it changes */ - int dontSleep; /* on SMP machines the wakeup call may be - earlier than the sleep call. wakeup sets - dontSleep and sleep resets it at return. */ #if defined(AFS_LINUX24_ENV) wait_queue_head_t cond; #else @@ -163,68 +159,81 @@ static afs_event_t *afs_getevent(char *event) newp = evp; evp = evp->next; } - if (!newp) { - newp = (afs_event_t *) osi_AllocSmallSpace(sizeof (afs_event_t)); - afs_evhashcnt++; - newp->next = afs_evhasht[hashcode]; - afs_evhasht[hashcode] = newp; -#if defined(AFS_LINUX24_ENV) - init_waitqueue_head(&newp->cond); -#else - init_waitqueue(&newp->cond); -#endif - newp->seq = 0; - } + if (!newp) + return NULL; + newp->event = event; newp->refcount = 1; - newp->dontSleep = 0; return newp; } +/* afs_addevent -- allocates a new event for the address. It isn't returned; + * instead, afs_getevent should be called again. Thus, the real effect of + * this routine is to add another event to the hash bucket for this + * address. + * + * Locks: + * Called with GLOCK held. However the function might drop + * GLOCK when it calls osi_AllocSmallSpace for allocating + * a new event (In Linux, the allocator drops GLOCK to avoid + * a deadlock). + */ + +static void afs_addevent(char *event) +{ + int hashcode; + afs_event_t *newp; + + AFS_ASSERT_GLOCK(); + hashcode = afs_evhash(event); + newp = osi_AllocSmallSpace(sizeof(afs_event_t)); + afs_evhashcnt++; + newp->next = afs_evhasht[hashcode]; + afs_evhasht[hashcode] = newp; +#if defined(AFS_LINUX24_ENV) + init_waitqueue_head(&newp->cond); +#else + init_waitqueue(&newp->cond); +#endif + newp->seq = 0; + newp->event = &dummyV; /* Dummy address for new events */ + newp->refcount = 0; +} + + /* Release the specified event */ #define relevent(evp) ((evp)->refcount--) +/* afs_osi_Sleep -- waits for an event to be notified. */ void afs_osi_Sleep(char *event) { struct afs_event *evp; int seq; - int count = 0; - int timeout = 1; evp = afs_getevent(event); - if (evp->dontSleep) { + if (!evp) { + /* Can't block because allocating a new event would require dropping + * the GLOCK, which may cause us to miss the wakeup. So call the + * allocator then return immediately. We'll find the new event next + * time around without dropping the GLOCK. */ + afs_addevent(event); + return; + } + + seq = evp->seq; + + while (seq == evp->seq) { afs_Trace4(afs_iclSetp, CM_TRACE_SLEEP, ICL_TYPE_POINTER, evp, - ICL_TYPE_INT32, count, + ICL_TYPE_INT32, 0/*count*/, ICL_TYPE_INT32, seq, ICL_TYPE_INT32, evp->seq); - } else { - seq = evp->seq; - while (seq == evp->seq && !evp->dontSleep) { - AFS_ASSERT_GLOCK(); - AFS_GUNLOCK(); -#ifdef AFS_SMP - /* - * There seems to be a problem on SMP machines if the wake_up() and - * interruptible_sleep() calls happen at the "same" time. - */ - if (timeout < 1024) - timeout = timeout << 1; - interruptible_sleep_on_timeout(&evp->cond, timeout); -#else - interruptible_sleep_on(&evp->cond); -#endif - AFS_GLOCK(); - count++; - afs_Trace4(afs_iclSetp, CM_TRACE_SLEEP, - ICL_TYPE_POINTER, evp, - ICL_TYPE_INT32, count, - ICL_TYPE_INT32, seq, - ICL_TYPE_INT32, evp->seq); - } + AFS_ASSERT_GLOCK(); + AFS_GUNLOCK(); + interruptible_sleep_on(&evp->cond); + AFS_GLOCK(); } - evp->dontSleep = 0; relevent(evp); } @@ -235,11 +244,8 @@ void afs_osi_Sleep(char *event) * ams --- max sleep time in milliseconds * aintok - 1 if should sleep interruptibly * - * Returns 0 if timeout and EINTR if signalled. - * - * While the Linux kernel still has a global lock, we can use the standard - * sleep calls and drop our locks early. The kernel lock will protect us - * until we get to sleep. + * Returns 0 if timeout, EINTR if signalled, and EGAIN if it might + * have raced. */ static int osi_TimedSleep(char *event, afs_int32 ams, int aintok) { @@ -247,6 +253,14 @@ static int osi_TimedSleep(char *event, afs_int32 ams, int aintok) struct afs_event *evp; evp = afs_getevent(event); + if (!evp) { + /* Can't block because allocating a new event would require dropping + * the GLOCK, which may cause us to miss the wakeup. So call the + * allocator then return immediately. We'll find the new event next + * time around without dropping the GLOCK. */ + afs_addevent(event); + return EAGAIN; + } AFS_GUNLOCK(); if (aintok) @@ -255,6 +269,8 @@ static int osi_TimedSleep(char *event, afs_int32 ams, int aintok) t = sleep_on_timeout(&evp->cond, t); AFS_GLOCK(); + relevent(evp); + return t ? EINTR : 0; } @@ -264,7 +280,9 @@ void afs_osi_Wakeup(char *event) struct afs_event *evp; evp = afs_getevent(event); - evp->dontSleep = 1; + if (!evp) /* No sleepers */ + return; + if (evp->refcount > 1) { evp->seq++; afs_Trace2(afs_iclSetp, CM_TRACE_WAKE, diff --git a/src/rx/LINUX/rx_knet.c b/src/rx/LINUX/rx_knet.c index 8c919ee95..ccc3447a9 100644 --- a/src/rx/LINUX/rx_knet.c +++ b/src/rx/LINUX/rx_knet.c @@ -181,7 +181,8 @@ void osi_StopListener(void) if (rxk_ListenerPid) { (void) (*sys_killp)(rxk_ListenerPid, 9); #ifdef AFS_LINUX24_ENV - afs_osi_Sleep(&rxk_ListenerPid); + afs_osi_Sleep(&rxk_ListenerPid); /* get an event */ + afs_osi_Sleep(&rxk_ListenerPid); /* actually sleep */ #else rxk_ListenerPid = 0; #endif