CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i=lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
csp = &osi_baseAtomicCS[lockp->atomicIndex];
EnterCriticalSection(csp);
+ if (lockp->flags & OSI_LOCKFLAG_EXCL) {
+ osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
+ } else {
+ for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
+ osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
+ }
+ }
+
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
(lockp->readers > 0)) {
lockp->waiters++;
osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
lockp->waiters--;
+ osi_assertx(lockp->waiters >= 0, "waiters underflow");
osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
} else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
- lockp->tid[0] = thrd_Current();
+ lockp->tid[0] = tid;
}
+ osi_assertx(lockp->readers == 0, "write lock readers present");
+
LeaveCriticalSection(csp);
if (lockOrderValidation) {
csp = &osi_baseAtomicCS[lockp->atomicIndex];
EnterCriticalSection(csp);
- for ( i=0; i < lockp->readers; i++ ) {
- osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
+ if (lockp->flags & OSI_LOCKFLAG_EXCL) {
+ osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
+ } else {
+ for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
+ osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
+ }
}
/* here we have the fast lock, so see if we can obtain the real lock */
lockp->waiters++;
osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
lockp->waiters--;
+ osi_assertx(lockp->waiters >= 0, "waiters underflow");
osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
} else {
/* if we're here, all clear to set the lock */
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation && lockp->level != 0) {
int found = 0;
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
osi_assertx(lockp->readers > 0, "read lock not held");
for ( i=0; i < lockp->readers; i++) {
}
/* releasing a read lock can allow readers or writers */
- if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
+ if (--(lockp->readers) == 0 && !osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, csp);
}
else {
+ osi_assertx(lockp->readers >= 0, "read lock underflow");
+
/* and finally release the big lock */
LeaveCriticalSection(csp);
}
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation && lockp->level != 0) {
int found = 0;
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
lockp->readers++;
+ osi_assertx(lockp->readers == 1, "read lock not one");
+
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
}
}
}
- if (--lockp->readers == 0) {
+ if (--(lockp->readers) == 0) {
/* convert read lock to write lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
lockp->tid[0] = tid;
} else {
+ osi_assertx(lockp->readers > 0, "read lock underflow");
+
lockp->waiters++;
osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
lockp->waiters--;
+ osi_assertx(lockp->waiters >= 0, "waiters underflow");
osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
}
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
lockp->waiters++;
osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
lockp->waiters--;
+ osi_assertx(lockp->waiters >= 0, "waiters underflow");
osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
} else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
lockp->tid = thrd_Current();
}
+
LeaveCriticalSection(csp);
if (lockOrderValidation) {
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation && lockp->level != 0) {
int found = 0;
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
if (i >= 0 && i < OSI_NLOCKTYPES)
return (osi_lockOps[i]->TryReadProc)(lockp);
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
}
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
i = 0;
}
else {
/* if we're here, all clear to set the lock */
- if (++lockp->readers < OSI_RWLOCK_THREADS)
+ if (++(lockp->readers) < OSI_RWLOCK_THREADS)
lockp->tid[lockp->readers-1] = thrd_Current();
i = 1;
}
if (i >= 0 && i < OSI_NLOCKTYPES)
return (osi_lockOps[i]->TryWriteProc)(lockp);
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
}
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
|| (lockp->readers > 0)) {
if (i >= 0 && i < OSI_NLOCKTYPES)
return (osi_lockOps[i]->TryMutexProc)(lockp);
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
}
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
i = 0;
TlsSetValue(tls_LockRefH, lockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
}
+
return i;
}
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation && lockp->level != 0) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
TlsSetValue(tls_LockRefT, lockRefT);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
for ( i=0; i < lockp->readers; i++) {
/* XXX better to get the list of things to wakeup from TSignalForMLs, and
* then do the wakeup after SleepSpin releases the low-level mutex.
*/
- if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
+ if (--(lockp->readers) == 0 && !osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
}
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation && lockp->level != 0) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
TlsSetValue(tls_LockRefT, lockRefT);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
return;
}
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
if (lockOrderValidation && lockp->level != 0) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
TlsSetValue(tls_LockRefT, lockRefT);
}
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
return;
}
- /* otherwise we have the base case, which requires no special
+ /*
+ * otherwise we have the base case, which requires no special
* initialization.
*/
- mp->type = 0;
- mp->flags = 0;
- mp->tid = 0;
+ memset(mp, 0, sizeof(osi_mutex_t));
mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
mp->level = level;
osi_TInit(&mp->d.turn);