From ef10390e12ffd697f9eaaacd1d7291e05d16c835 Mon Sep 17 00:00:00 2001 From: Jeffrey Altman Date: Fri, 22 Aug 2008 19:06:04 +0000 Subject: [PATCH] windows-misc-20080822 LICENSE MIT 1. In multi-threaded applications deadlocking is always a problem. Deadlock avoidance requires a strict adherence to a documented hierarchy. The lock hierarchy for OAFW is described in a file called locks.txt. There are two problems. First, some of the locks are not included in locks.txt. Second, it is nearly impossible given the depth of function calls for any programmer to identify all of the locks that are held at any given time a function is called. This patch implements a new locking order verification mechanism. Each lock is assigned a lock level at initialization. Each thread maintains a queue of held locks. Each time a lock is acquired the queue is checked to ensure that no locks with a higher level than the requested lock has already been acquired. If a violation occurs, the service panics. 2. When the service panics ensure that a minidump will always be generated. 3. Remove unused lock cm_bufGetMutex. 4. The lock order verifier identified approximately a dozen lock order violations that are corrected. 5. A race condition within the function path cm_GetSCache() -> cm_GetNewSCache() -> cm_RecycleSCache() permitted a cm_scache_t object to be issued simultaneously to two threads. This would eventually result in a panic due to the resulting under count. 6. Fix interpretation of the empty string as the ioctl path to mean the current directory. "fs lsm", "symlink list", etc. now return a "not a ..." error instead of "does not exist". 7. Add SMB_STRF_SRCNULTERM flag to smb_ParseStringXXX functions to indicate that the input string is a nul terminated string. Assign it when input strings are nul terminated. 8. The CIFS protocol specification for handling NT_TRANSACT_CREATE does not match the observed behavior. The 'nameLength' is specified in bytes not in characters. Fix the implementation to match. 9. The cm_HaveAccessRights() attempt at deadlock avoidance by calling lock_TryRead() on the parent directory cm_scache_t rw-lock does not avoid the deadlock. Avoid the deadlock by enforcing the lock order of lowest vnode first. Then remove the infinite loop avoidance in cm_SyncOp() that was returning an unwarranted access denied error. --- src/WINNT/afsd/afsd_init.c | 4 +- src/WINNT/afsd/afsd_service.c | 23 +- src/WINNT/afsd/cm.h | 45 ++ src/WINNT/afsd/cm_access.c | 21 +- src/WINNT/afsd/cm_aclent.c | 2 +- src/WINNT/afsd/cm_buf.c | 21 +- src/WINNT/afsd/cm_callback.c | 4 +- src/WINNT/afsd/cm_cell.c | 63 +- src/WINNT/afsd/cm_cell.h | 2 +- src/WINNT/afsd/cm_conn.c | 9 +- src/WINNT/afsd/cm_daemon.c | 3 +- src/WINNT/afsd/cm_dcache.c | 6 - src/WINNT/afsd/cm_dnlc.c | 2 +- src/WINNT/afsd/cm_freelance.c | 4 +- src/WINNT/afsd/cm_ioctl.c | 30 +- src/WINNT/afsd/cm_rpc.c | 3 +- src/WINNT/afsd/cm_scache.c | 135 ++-- src/WINNT/afsd/cm_server.c | 32 +- src/WINNT/afsd/cm_user.c | 4 +- src/WINNT/afsd/cm_utils.c | 2 +- src/WINNT/afsd/cm_vnodeops.c | 32 +- src/WINNT/afsd/cm_volume.c | 6 +- src/WINNT/afsd/smb.c | 30 +- src/WINNT/afsd/smb.h | 1 + src/WINNT/afsd/smb3.c | 65 +- src/WINNT/client_creds/main.cpp | 4 +- src/WINNT/client_osi/osibasel.c | 1148 +++++++++++++++++++------------ src/WINNT/client_osi/osibasel.h | 66 +- src/WINNT/client_osi/osilog.c | 10 +- src/WINNT/client_osi/osilog.h | 2 +- src/WINNT/client_osi/osiltype.h | 4 +- src/WINNT/client_osi/osisleep.c | 6 +- 32 files changed, 1118 insertions(+), 671 deletions(-) diff --git a/src/WINNT/afsd/afsd_init.c b/src/WINNT/afsd/afsd_init.c index 132d46caf..5fda13861 100644 --- a/src/WINNT/afsd/afsd_init.c +++ b/src/WINNT/afsd/afsd_init.c @@ -1681,8 +1681,8 @@ OpenDumpFile(void) void GenerateMiniDump(PEXCEPTION_POINTERS ep) { - if (IsDebuggerPresent()) - return; + if (IsDebuggerPresent()) + return; if (ep == NULL) { diff --git a/src/WINNT/afsd/afsd_service.c b/src/WINNT/afsd/afsd_service.c index 5a59be4fe..82709c4b3 100644 --- a/src/WINNT/afsd/afsd_service.c +++ b/src/WINNT/afsd/afsd_service.c @@ -53,11 +53,15 @@ extern HANDLE afsi_file; static int powerEventsRegistered = 0; extern int powerStateSuspended = 0; +static VOID (WINAPI* pRtlCaptureContext)(PCONTEXT ContextRecord) = NULL; + /* * Notifier function for use by osi_panic */ static void afsd_notifier(char *msgp, char *filep, long line) { + CONTEXT context; + if (!msgp) msgp = "unspecified assert"; @@ -74,15 +78,17 @@ static void afsd_notifier(char *msgp, char *filep, long line) afsd_ForceTrace(TRUE); buf_ForceTrace(TRUE); + if (pRtlCaptureContext) { + pRtlCaptureContext(&context); + afsd_printStack(GetCurrentThread(), &context); + } + afsi_log("--- begin dump ---"); cm_MemDumpDirStats(afsi_file, "a", 0); cm_MemDumpBPlusStats(afsi_file, "a", 0); cm_DumpCells(afsi_file, "a", 0); cm_DumpVolumes(afsi_file, "a", 0); cm_DumpSCache(afsi_file, "a", 0); -#ifdef keisa - cm_dnlcDump(afsi_file, "a"); -#endif cm_DumpBufHashTable(afsi_file, "a", 0); smb_DumpVCP(afsi_file, "a", 0); afsi_log("--- end dump ---"); @@ -92,6 +98,8 @@ static void afsd_notifier(char *msgp, char *filep, long line) DebugBreak(); #endif + GenerateMiniDump(NULL); + SetEvent(WaitToTerminate); #ifdef JUMP @@ -1097,6 +1105,7 @@ afsd_Main(DWORD argc, LPTSTR *argv) #endif /* JUMP */ HMODULE hHookDll; HMODULE hAdvApi32; + HMODULE hKernel32; #ifdef _DEBUG void afsd_DbgBreakAllocInit(); @@ -1111,6 +1120,14 @@ afsd_Main(DWORD argc, LPTSTR *argv) osi_InitPanic(afsd_notifier); osi_InitTraceOption(); + hKernel32 = LoadLibrary("kernel32.dll"); + if (hKernel32 == NULL) + { + afsi_log("Fatal: cannot load kernel32.dll"); + return; + } + pRtlCaptureContext = GetProcAddress(hKernel32, "RtlCaptureContext"); + GlobalStatus = 0; afsi_start(); diff --git a/src/WINNT/afsd/cm.h b/src/WINNT/afsd/cm.h index f525bdc75..3e246fbd4 100644 --- a/src/WINNT/afsd/cm.h +++ b/src/WINNT/afsd/cm.h @@ -105,4 +105,49 @@ #define RWVOL 0 #define ROVOL 1 #define BACKVOL 2 + +#define LOCK_HIERARCHY_IGNORE 0 + +#define LOCK_HIERARCHY_SMB_STARTED 30 +#define LOCK_HIERARCHY_SMB_LISTENER 35 +#define LOCK_HIERARCHY_SMB_GLOBAL 40 +#define LOCK_HIERARCHY_SMB_DIRSEARCH 50 +#define LOCK_HIERARCHY_SMB_FID 60 +#define LOCK_HIERARCHY_SMB_TID 70 +#define LOCK_HIERARCHY_SMB_UID 80 +#define LOCK_HIERARCHY_SMB_RAWBUF 100 +#define LOCK_HIERARCHY_SMB_DIRWATCH 105 +#define LOCK_HIERARCHY_SMB_RCT_GLOBAL 110 +#define LOCK_HIERARCHY_SMB_USERNAME 115 +#define LOCK_HIERARCHY_SMB_VC 120 + + +#define LOCK_HIERARCHY_DAEMON_GLOBAL 400 + +#define LOCK_HIERARCHY_SCACHE_DIRLOCK 500 +#define LOCK_HIERARCHY_SCACHE_BUFCREATE 510 +#define LOCK_HIERARCHY_BUFFER 530 +#define LOCK_HIERARCHY_SCACHE 540 +#define LOCK_HIERARCHY_BUF_GLOBAL 550 +#define LOCK_HIERARCHY_VOLUME 560 +#define LOCK_HIERARCHY_USER 570 +#define LOCK_HIERARCHY_SCACHE_GLOBAL 580 +#define LOCK_HIERARCHY_CONN_GLOBAL 600 +#define LOCK_HIERARCHY_CELL 620 +#define LOCK_HIERARCHY_CELL_GLOBAL 630 +#define LOCK_HIERARCHY_SERVER 640 +#define LOCK_HIERARCHY_CALLBACK_GLOBAL 645 +#define LOCK_HIERARCHY_SERVER_GLOBAL 650 +#define LOCK_HIERARCHY_CONN 660 +#define LOCK_HIERARCHY_VOLUME_GLOBAL 670 +#define LOCK_HIERARCHY_DNLC_GLOBAL 690 +#define LOCK_HIERARCHY_FREELANCE_GLOBAL 700 +#define LOCK_HIERARCHY_UTILS_GLOBAL 710 +#define LOCK_HIERARCHY_OTHER_GLOBAL 720 +#define LOCK_HIERARCHY_ACL_GLOBAL 730 +#define LOCK_HIERARCHY_USER_GLOBAL 740 +#define LOCK_HIERARCHY_AFSDBSBMT_GLOBAL 1000 +#define LOCK_HIERARCHY_TOKEN_EVENT_GLOBAL 2000 +#define LOCK_HIERARCHY_SYSCFG_GLOBAL 3000 #endif /* __CM_H_ENV__ */ + diff --git a/src/WINNT/afsd/cm_access.c b/src/WINNT/afsd/cm_access.c index 90948cfd8..57ca0aa6b 100644 --- a/src/WINNT/afsd/cm_access.c +++ b/src/WINNT/afsd/cm_access.c @@ -39,12 +39,6 @@ int cm_HaveAccessRights(struct cm_scache *scp, struct cm_user *userp, afs_uint32 long trights; int release = 0; /* Used to avoid a call to cm_HoldSCache in the directory case */ -#if 0 - if (scp->flags & CM_SCACHEFLAG_EACCESS) { - *outRightsp = 0; - return 1; - } -#endif didLock = 0; if (scp->fileType == CM_SCACHETYPE_DIRECTORY) { aclScp = scp; /* not held, not released */ @@ -54,12 +48,11 @@ int cm_HaveAccessRights(struct cm_scache *scp, struct cm_user *userp, afs_uint32 if (!aclScp) return 0; if (aclScp != scp) { - code = lock_TryRead(&aclScp->rw); - if (code == 0) { - /* can't get lock safely and easily */ - cm_ReleaseSCache(aclScp); - return 0; - } + if (aclScp->fid.vnode < scp->fid.vnode) + lock_ReleaseWrite(&scp->rw); + lock_ObtainRead(&aclScp->rw); + if (aclScp->fid.vnode < scp->fid.vnode) + lock_ObtainWrite(&scp->rw); /* check that we have a callback, too */ if (!cm_HaveCallback(aclScp)) { @@ -167,6 +160,8 @@ long cm_GetAccessRights(struct cm_scache *scp, struct cm_user *userp, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_FORCECB); if (!code) cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + else + osi_Log3(afsd_logp, "GetAccessRights syncop failure scp %x user %x code %x", scp, userp, code); } else { /* not a dir, use parent dir's acl */ cm_SetFid(&tfid, scp->fid.cell, scp->fid.volume, scp->parentVnode, scp->parentUnique); @@ -184,6 +179,8 @@ long cm_GetAccessRights(struct cm_scache *scp, struct cm_user *userp, if (!code) cm_SyncOpDone(aclScp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); + else + osi_Log3(afsd_logp, "GetAccessRights parent syncop failure scp %x user %x code %x", aclScp, userp, code); lock_ReleaseWrite(&aclScp->rw); cm_ReleaseSCache(aclScp); lock_ObtainWrite(&scp->rw); diff --git a/src/WINNT/afsd/cm_aclent.c b/src/WINNT/afsd/cm_aclent.c index deb281aa4..c2138ede5 100644 --- a/src/WINNT/afsd/cm_aclent.c +++ b/src/WINNT/afsd/cm_aclent.c @@ -257,7 +257,7 @@ long cm_InitACLCache(int newFile, long size) static osi_once_t once; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_aclLock, "cm_aclLock"); + lock_InitializeRWLock(&cm_aclLock, "cm_aclLock", LOCK_HIERARCHY_ACL_GLOBAL); osi_EndOnce(&once); } diff --git a/src/WINNT/afsd/cm_buf.c b/src/WINNT/afsd/cm_buf.c index 006745b91..3a72373d7 100644 --- a/src/WINNT/afsd/cm_buf.c +++ b/src/WINNT/afsd/cm_buf.c @@ -389,7 +389,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers) if (osi_Once(&once)) { /* initialize global locks */ - lock_InitializeRWLock(&buf_globalLock, "Global buffer lock"); + lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL); if ( newFile ) { /* remember this for those who want to reset it */ @@ -424,7 +424,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers) osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q); bp->flags |= CM_BUF_INLRU; - lock_InitializeMutex(&bp->mx, "Buffer mutex"); + lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER); /* grab appropriate number of bytes from aligned zone */ bp->datap = data; @@ -448,7 +448,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers) data = cm_data.bufDataBaseAddress; for (i=0; imx, "Buffer mutex"); + lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER); bp->userp = NULL; bp->waitCount = 0; bp->waitRequests = 0; @@ -960,24 +960,25 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q); bp->flags &= ~CM_BUF_INLRU; + /* prepare to return it. Give it a refcount */ + bp->refCount = 1; +#ifdef DEBUG_REFCOUNT + osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1); + afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1); +#endif /* grab the mutex so that people don't use it * before the caller fills it with data. Again, no one * should have been able to get to this dude to lock it. */ if (!lock_TryMutex(&bp->mx)) { osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0", - bp, bp->refCount); + bp, bp->refCount); osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__); } - /* prepare to return it. Give it a refcount */ - bp->refCount = 1; -#ifdef DEBUG_REFCOUNT - osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1); - afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1); -#endif lock_ReleaseWrite(&buf_globalLock); lock_ReleaseRead(&scp->bufCreateLock); + *bufpp = bp; #ifdef TESTING diff --git a/src/WINNT/afsd/cm_callback.c b/src/WINNT/afsd/cm_callback.c index dfe849c98..4d5e7c1aa 100644 --- a/src/WINNT/afsd/cm_callback.c +++ b/src/WINNT/afsd/cm_callback.c @@ -588,7 +588,6 @@ extern osi_rwlock_t smb_globalLock; extern osi_rwlock_t smb_rctLock; extern osi_mutex_t cm_Freelance_Lock; -extern osi_mutex_t cm_bufGetMutex; extern osi_mutex_t cm_Afsdsbmt_Lock; extern osi_mutex_t tokenEventLock; extern osi_mutex_t smb_ListenerLock; @@ -618,7 +617,6 @@ static struct _ltable { {"smb_globalLock", (char*)&smb_globalLock, LOCKTYPE_RW}, {"smb_rctLock", (char*)&smb_rctLock, LOCKTYPE_RW}, {"cm_Freelance_Lock",(char*)&cm_Freelance_Lock, LOCKTYPE_MUTEX}, - {"cm_bufGetMutex", (char*)&cm_bufGetMutex, LOCKTYPE_MUTEX}, {"cm_Afsdsbmt_Lock", (char*)&cm_Afsdsbmt_Lock, LOCKTYPE_MUTEX}, {"tokenEventLock", (char*)&tokenEventLock, LOCKTYPE_MUTEX}, {"smb_ListenerLock", (char*)&smb_ListenerLock, LOCKTYPE_MUTEX}, @@ -1437,7 +1435,7 @@ int SRXAFSCB_GetCacheConfig(struct rx_call *callp, /* called by afsd without any locks to initialize this module */ void cm_InitCallback(void) { - lock_InitializeRWLock(&cm_callbackLock, "cm_callbackLock"); + lock_InitializeRWLock(&cm_callbackLock, "cm_callbackLock", LOCK_HIERARCHY_CALLBACK_GLOBAL); cm_activeCallbackGrantingCalls = 0; } diff --git a/src/WINNT/afsd/cm_cell.c b/src/WINNT/afsd/cm_cell.c index 33269c8df..d8c588887 100644 --- a/src/WINNT/afsd/cm_cell.c +++ b/src/WINNT/afsd/cm_cell.c @@ -87,7 +87,10 @@ cm_cell_t *cm_UpdateCell(cm_cell_t * cp, afs_uint32 flags) || (cm_dnsEnabled && (cp->flags & CM_CELLFLAG_DNS) && ((cp->flags & CM_CELLFLAG_VLSERVER_INVALID))) #endif - ) { + ) + { + lock_ReleaseMutex(&cp->mx); + /* must empty cp->vlServersp */ if (cp->vlServersp) { cm_FreeServerList(&cp->vlServersp, CM_FREESERVERLIST_DELETE); @@ -104,9 +107,11 @@ cm_cell_t *cm_UpdateCell(cm_cell_t * cp, afs_uint32 flags) code = cm_SearchCellByDNS(cp->name, NULL, &ttl, cm_AddCellProc, &rock); if (code == 0) { /* got cell from DNS */ + lock_ObtainMutex(&cp->mx); cp->flags |= CM_CELLFLAG_DNS; cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID; cp->timeout = time(0) + ttl; + lock_ReleaseMutex(&cp->mx); #ifdef DEBUG fprintf(stderr, "cell %s: ttl=%d\n", cp->name, ttl); #endif @@ -114,16 +119,21 @@ cm_cell_t *cm_UpdateCell(cm_cell_t * cp, afs_uint32 flags) /* if we fail to find it this time, we'll just do nothing and leave the * current entry alone */ + lock_ObtainMutex(&cp->mx); cp->flags |= CM_CELLFLAG_VLSERVER_INVALID; + lock_ReleaseMutex(&cp->mx); } } } else #endif /* AFS_AFSDB_ENV */ { + lock_ObtainMutex(&cp->mx); cp->timeout = time(0) + 7200; + lock_ReleaseMutex(&cp->mx); } + } else { + lock_ReleaseMutex(&cp->mx); } - lock_ReleaseMutex(&cp->mx); return code ? NULL : cp; } @@ -166,12 +176,11 @@ cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) } } - lock_ReleaseRead(&cm_cellLock); - if (cp) { + lock_ReleaseRead(&cm_cellLock); cm_UpdateCell(cp, flags); } else if (flags & CM_FLAG_CREATE) { - lock_ObtainWrite(&cm_cellLock); + lock_ConvertRToW(&cm_cellLock); hasWriteLock = 1; /* when we dropped the lock the cell could have been added @@ -208,7 +217,18 @@ cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) cp = &cm_data.cellBaseAddress[cm_data.currentCells]; memset(cp, 0, sizeof(cm_cell_t)); cp->magic = CM_CELL_MAGIC; - + + /* the cellID cannot be 0 */ + cp->cellID = ++cm_data.currentCells; + + /* otherwise we found the cell, and so we're nearly done */ + lock_InitializeMutex(&cp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL); + + cp->name[0] = '\0'; /* No name yet */ + + lock_ReleaseWrite(&cm_cellLock); + hasWriteLock = 0; + rock.cellp = cp; rock.flags = flags; code = cm_SearchCellFile(namep, fullname, cm_AddCellProc, &rock); @@ -227,6 +247,8 @@ cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) cp = NULL; goto done; } else { /* got cell from DNS */ + lock_ObtainWrite(&cm_cellLock); + hasWriteLock = 1; cp->flags |= CM_CELLFLAG_DNS; cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID; cp->timeout = time(0) + ttl; @@ -239,6 +261,8 @@ cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) } #endif } else { + lock_ObtainWrite(&cm_cellLock); + hasWriteLock = 1; cp->timeout = time(0) + 7200; /* two hour timeout */ } @@ -264,17 +288,11 @@ cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) /* randomise among those vlservers having the same rank*/ cm_RandomizeServer(&cp->vlServersp); - /* otherwise we found the cell, and so we're nearly done */ - lock_InitializeMutex(&cp->mx, "cm_cell_t mutex"); - /* copy in name */ strncpy(cp->name, fullname, CELL_MAXNAMELEN); cp->name[CELL_MAXNAMELEN-1] = '\0'; - /* the cellID cannot be 0 */ - cp->cellID = ++cm_data.currentCells; - - /* append cell to global list */ + /* append cell to global list */ if (cm_data.allCellsp == NULL) { cm_data.allCellsp = cp; } else { @@ -286,16 +304,21 @@ cm_cell_t *cm_GetCell_Gen(char *namep, char *newnamep, afs_uint32 flags) cm_AddCellToNameHashTable(cp); cm_AddCellToIDHashTable(cp); + } else { + lock_ReleaseRead(&cm_cellLock); } - done: if (hasWriteLock) lock_ReleaseWrite(&cm_cellLock); /* fullname is not valid if cp == NULL */ - if (cp && newnamep) { - strncpy(newnamep, fullname, CELL_MAXNAMELEN); - newnamep[CELL_MAXNAMELEN-1]='\0'; + if (newnamep) { + if (cp) { + strncpy(newnamep, fullname, CELL_MAXNAMELEN); + newnamep[CELL_MAXNAMELEN-1]='\0'; + } else { + newnamep[0] = '\0'; + } } return cp; } @@ -370,7 +393,7 @@ void cm_InitCell(int newFile, long maxCells) if (osi_Once(&once)) { cm_cell_t * cellp; - lock_InitializeRWLock(&cm_cellLock, "cell global lock"); + lock_InitializeRWLock(&cm_cellLock, "cell global lock", LOCK_HIERARCHY_CELL_GLOBAL); if ( newFile ) { cm_data.allCellsp = NULL; @@ -388,7 +411,7 @@ void cm_InitCell(int newFile, long maxCells) memset(cellp, 0, sizeof(cm_cell_t)); cellp->magic = CM_CELL_MAGIC; - lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex"); + lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL); /* copy in name */ strncpy(cellp->name, "Freelance.Local.Cell", CELL_MAXNAMELEN); /*safe*/ @@ -407,7 +430,7 @@ void cm_InitCell(int newFile, long maxCells) #endif } else { for (cellp = cm_data.allCellsp; cellp; cellp=cellp->allNextp) { - lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex"); + lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL); cellp->vlServersp = NULL; cellp->flags |= CM_CELLFLAG_VLSERVER_INVALID; } diff --git a/src/WINNT/afsd/cm_cell.h b/src/WINNT/afsd/cm_cell.h index 659843a25..8945d0f50 100644 --- a/src/WINNT/afsd/cm_cell.h +++ b/src/WINNT/afsd/cm_cell.h @@ -25,7 +25,7 @@ typedef struct cm_cell { cm_serverRef_t *vlServersp; /* locked by cm_serverLock */ osi_mutex_t mx; /* mutex locking fields (flags) */ long flags; /* locked by mx */ - time_t timeout; /* if dns, time at which the server addrs expire */ + time_t timeout; /* if dns, time at which the server addrs expire (mx) */ } cm_cell_t; /* These are bit flag values */ diff --git a/src/WINNT/afsd/cm_conn.c b/src/WINNT/afsd/cm_conn.c index 143c5d3c6..08411cc56 100644 --- a/src/WINNT/afsd/cm_conn.c +++ b/src/WINNT/afsd/cm_conn.c @@ -49,7 +49,8 @@ void cm_InitConn(void) HKEY parmKey; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_connLock, "connection global lock"); + lock_InitializeRWLock(&cm_connLock, "connection global lock", + LOCK_HIERARCHY_CONN_GLOBAL); /* keisa - read timeout value for lanmanworkstation service. * jaltman - as per @@ -392,11 +393,13 @@ cm_Analyze(cm_conn_t *connp, cm_user_t *userp, cm_req_t *reqp, if (tsrp->server == serverp && tsrp->status == srv_not_busy) { tsrp->status = srv_busy; if (fidp) { /* File Server query */ + lock_ReleaseWrite(&cm_serverLock); code = cm_FindVolumeByID(cellp, fidp->volume, userp, reqp, CM_GETVOL_FLAG_NO_LRU_UPDATE, &volp); if (code == 0) statep = cm_VolumeStateByID(volp, fidp->volume); + lock_ObtainWrite(&cm_serverLock); } break; } @@ -484,11 +487,13 @@ cm_Analyze(cm_conn_t *connp, cm_user_t *userp, cm_req_t *reqp, } if (fidp) { /* File Server query */ + lock_ReleaseWrite(&cm_serverLock); code = cm_FindVolumeByID(cellp, fidp->volume, userp, reqp, CM_GETVOL_FLAG_NO_LRU_UPDATE, &volp); if (code == 0) cm_VolumeStateByID(volp, fidp->volume); + lock_ObtainWrite(&cm_serverLock); } } } @@ -977,7 +982,7 @@ long cm_ConnByServer(cm_server_t *serverp, cm_user_t *userp, cm_conn_t **connpp) serverp->connsp = tcp; cm_HoldUser(userp); tcp->userp = userp; - lock_InitializeMutex(&tcp->mx, "cm_conn_t mutex"); + lock_InitializeMutex(&tcp->mx, "cm_conn_t mutex", LOCK_HIERARCHY_CONN); lock_ObtainMutex(&tcp->mx); tcp->serverp = serverp; tcp->cryptlevel = rxkad_clear; diff --git a/src/WINNT/afsd/cm_daemon.c b/src/WINNT/afsd/cm_daemon.c index b930a945e..478b5e116 100644 --- a/src/WINNT/afsd/cm_daemon.c +++ b/src/WINNT/afsd/cm_daemon.c @@ -601,7 +601,8 @@ void cm_InitDaemon(int nDaemons) cm_nDaemons = (nDaemons > CM_MAX_DAEMONS) ? CM_MAX_DAEMONS : nDaemons; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_daemonLock, "cm_daemonLock"); + lock_InitializeRWLock(&cm_daemonLock, "cm_daemonLock", + LOCK_HIERARCHY_DAEMON_GLOBAL); osi_EndOnce(&once); /* creating IP Address Change monitor daemon */ diff --git a/src/WINNT/afsd/cm_dcache.c b/src/WINNT/afsd/cm_dcache.c index f1f328873..d9f99fdb7 100644 --- a/src/WINNT/afsd/cm_dcache.c +++ b/src/WINNT/afsd/cm_dcache.c @@ -26,7 +26,6 @@ extern void afsi_log(char *pattern, ...); #endif -osi_mutex_t cm_bufGetMutex; #ifdef AFS_FREELANCE_CLIENT extern osi_mutex_t cm_Freelance_Lock; #endif @@ -483,7 +482,6 @@ long cm_ShutdownDCache(void) int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers) { - lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex"); return buf_Init(newFile, &cm_bufOps, nbuffers); } @@ -1092,7 +1090,6 @@ long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp, * sequence at a time. */ - // lock_ObtainMutex(&cm_bufGetMutex); /* first hold all buffers, since we can't hold any locks in buf_Get */ while (1) { /* stop at chunk boundary */ @@ -1105,7 +1102,6 @@ long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp, code = buf_Get(scp, &pageBase, &tbp); if (code) { - //lock_ReleaseMutex(&cm_bufGetMutex); lock_ObtainWrite(&scp->rw); cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS); return code; @@ -1121,8 +1117,6 @@ long cm_SetupFetchBIOD(cm_scache_t *scp, osi_hyper_t *offsetp, /* reserve a chunk's worth of buffers if possible */ reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize); - // lock_ReleaseMutex(&cm_bufGetMutex); - pageBase = *offsetp; collected = pageBase.LowPart & (cm_chunkSize - 1); diff --git a/src/WINNT/afsd/cm_dnlc.c b/src/WINNT/afsd/cm_dnlc.c index bcecf5996..189f62ae0 100644 --- a/src/WINNT/afsd/cm_dnlc.c +++ b/src/WINNT/afsd/cm_dnlc.c @@ -672,7 +672,7 @@ cm_dnlcInit(int newFile) memset (&dnlcstats, 0, sizeof(dnlcstats)); - lock_InitializeRWLock(&cm_dnlcLock, "cm_dnlcLock"); + lock_InitializeRWLock(&cm_dnlcLock, "cm_dnlcLock", LOCK_HIERARCHY_DNLC_GLOBAL); if ( newFile ) { lock_ObtainWrite(&cm_dnlcLock); cm_data.ncfreelist = (cm_nc_t *) 0; diff --git a/src/WINNT/afsd/cm_freelance.c b/src/WINNT/afsd/cm_freelance.c index 4899ab06a..5b88d65eb 100644 --- a/src/WINNT/afsd/cm_freelance.c +++ b/src/WINNT/afsd/cm_freelance.c @@ -139,7 +139,7 @@ void cm_InitFreelance() { thread_t phandle; int lpid; - lock_InitializeMutex(&cm_Freelance_Lock, "Freelance Lock"); + lock_InitializeMutex(&cm_Freelance_Lock, "Freelance Lock", LOCK_HIERARCHY_FREELANCE_GLOBAL); // yj: first we make a call to cm_initLocalMountPoints // to read all the local mount points from the registry @@ -391,7 +391,7 @@ int cm_reInitLocalMountPoints() { if (scp != cm_data.rootSCachep && cm_FidCmp(&scp->fid, &aFid) == 0) { // mark the scp to be reused cm_HoldSCacheNoLock(scp); - lock_ReleaseWrite(&cm_Freelance_Lock); + lock_ReleaseMutex(&cm_Freelance_Lock); lock_ReleaseWrite(&cm_scacheLock); lock_ObtainWrite(&scp->rw); cm_DiscardSCache(scp); diff --git a/src/WINNT/afsd/cm_ioctl.c b/src/WINNT/afsd/cm_ioctl.c index a161f90b3..3733c1b4c 100644 --- a/src/WINNT/afsd/cm_ioctl.c +++ b/src/WINNT/afsd/cm_ioctl.c @@ -60,7 +60,8 @@ extern void afsi_log(char *pattern, ...); void cm_InitIoctl(void) { - lock_InitializeMutex(&cm_Afsdsbmt_Lock, "AFSDSBMT.INI Access Lock"); + lock_InitializeMutex(&cm_Afsdsbmt_Lock, "AFSDSBMT.INI Access Lock", + LOCK_HIERARCHY_AFSDBSBMT_GLOBAL); } /* @@ -984,7 +985,8 @@ cm_IoctlStatMountPoint(struct cm_ioctl *ioctlp, struct cm_user *userp, cm_scache clientchar_t *cp; cp = cm_ParseIoctlStringAlloc(ioctlp, NULL); - code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); + + code = cm_Lookup(dscp, cp[0] ? cp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); if (code) goto done_2; @@ -1039,7 +1041,7 @@ cm_IoctlDeleteMountPoint(struct cm_ioctl *ioctlp, struct cm_user *userp, cm_scac cp = cm_ParseIoctlStringAlloc(ioctlp, NULL); - code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); + code = cm_Lookup(dscp, cp[0] ? cp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); /* if something went wrong, bail out now */ if (code) @@ -1406,10 +1408,12 @@ cm_IoctlNewCell(struct cm_ioctl *ioctlp, struct cm_user *userp) for (cp = cm_data.allCellsp; cp; cp=cp->allNextp) { afs_int32 code; - lock_ObtainMutex(&cp->mx); + /* delete all previous server lists - cm_FreeServerList will ask for write on cm_ServerLock*/ cm_FreeServerList(&cp->vlServersp, CM_FREESERVERLIST_DELETE); cp->vlServersp = NULL; + lock_ReleaseWrite(&cm_cellLock); + rock.cellp = cp; rock.flags = 0; code = cm_SearchCellFile(cp->name, cp->name, cm_AddCellProc, &rock); @@ -1419,26 +1423,34 @@ cm_IoctlNewCell(struct cm_ioctl *ioctlp, struct cm_user *userp) int ttl; code = cm_SearchCellByDNS(cp->name, cp->name, &ttl, cm_AddCellProc, &rock); if ( code == 0 ) { /* got cell from DNS */ + lock_ObtainMutex(&cp->mx); cp->flags |= CM_CELLFLAG_DNS; cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID; cp->timeout = time(0) + ttl; + lock_ReleaseMutex(&cp->mx); } } } else { + lock_ObtainMutex(&cp->mx); cp->flags &= ~CM_CELLFLAG_DNS; + lock_ReleaseMutex(&cp->mx); } #endif /* AFS_AFSDB_ENV */ if (code) { + lock_ObtainMutex(&cp->mx); cp->flags |= CM_CELLFLAG_VLSERVER_INVALID; + lock_ReleaseMutex(&cp->mx); + lock_ObtainWrite(&cm_cellLock); } else { + lock_ObtainMutex(&cp->mx); cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID; + lock_ReleaseMutex(&cp->mx); + lock_ObtainWrite(&cm_cellLock); cm_RandomizeServer(&cp->vlServersp); } - lock_ReleaseMutex(&cp->mx); } - lock_ReleaseWrite(&cm_cellLock); return 0; } @@ -1943,7 +1955,7 @@ cm_IoctlListlink(struct cm_ioctl *ioctlp, struct cm_user *userp, cm_scache_t *ds cp = ioctlp->inDatap; clientp = cm_Utf8ToClientStringAlloc(cp, -1, NULL); - code = cm_Lookup(dscp, clientp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); + code = cm_Lookup(dscp, clientp[0] ? clientp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); free(clientp); if (code) return code; @@ -2013,7 +2025,7 @@ cm_IoctlIslink(struct cm_ioctl *ioctlp, struct cm_user *userp, cm_scache_t *dscp osi_LogEvent("cm_IoctlListlink",NULL," name[%s]",cp); clientp = cm_Utf8ToClientStringAlloc(cp, -1, NULL); - code = cm_Lookup(dscp, clientp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); + code = cm_Lookup(dscp, clientp[0] ? clientp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); free(clientp); if (code) return code; @@ -2050,7 +2062,7 @@ cm_IoctlDeletelink(struct cm_ioctl *ioctlp, struct cm_user *userp, cm_scache_t * cp = ioctlp->inDatap; clientp = cm_Utf8ToClientStringAlloc(cp, -1, NULL); - code = cm_Lookup(dscp, clientp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); + code = cm_Lookup(dscp, clientp[0] ? clientp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp); /* if something went wrong, bail out now */ if (code) diff --git a/src/WINNT/afsd/cm_rpc.c b/src/WINNT/afsd/cm_rpc.c index 1fca2784f..fe495556f 100644 --- a/src/WINNT/afsd/cm_rpc.c +++ b/src/WINNT/afsd/cm_rpc.c @@ -209,7 +209,8 @@ long RpcInit() ULONG listenThreadID = 0; char * name = "afsd_rpc_ShutdownEvent"; - lock_InitializeMutex(&tokenEventLock, "token event lock"); + lock_InitializeMutex(&tokenEventLock, "token event lock", + LOCK_HIERARCHY_TOKEN_EVENT_GLOBAL); rpc_ShutdownEvent = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) diff --git a/src/WINNT/afsd/cm_scache.c b/src/WINNT/afsd/cm_scache.c index d95662e8f..a8984406a 100644 --- a/src/WINNT/afsd/cm_scache.c +++ b/src/WINNT/afsd/cm_scache.c @@ -75,7 +75,7 @@ void cm_RemoveSCacheFromHashTable(cm_scache_t *scp) } } -/* called with cm_scacheLock write-locked; recycles an existing scp. +/* called with cm_scacheLock and scp write-locked; recycles an existing scp. * * this function ignores all of the locking hierarchy. */ @@ -93,9 +93,7 @@ long cm_RecycleSCache(cm_scache_t *scp, afs_int32 flags) return -1; } - lock_ObtainWrite(&scp->rw); cm_RemoveSCacheFromHashTable(scp); - lock_ReleaseWrite(&scp->rw); #if 0 if (flags & CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS) { @@ -226,8 +224,10 @@ long cm_RecycleSCache(cm_scache_t *scp, afs_int32 flags) } -/* called with cm_scacheLock write-locked; find a vnode to recycle. +/* + * called with cm_scacheLock write-locked; find a vnode to recycle. * Can allocate a new one if desperate, or if below quota (cm_data.maxSCaches). + * returns scp->mx held. */ cm_scache_t *cm_GetNewSCache(void) { @@ -246,6 +246,9 @@ cm_scache_t *cm_GetNewSCache(void) if (scp->refCount == 0) { if (scp->flags & CM_SCACHEFLAG_DELETED) { + if (!lock_TryWrite(&scp->rw)) + continue; + osi_Log1(afsd_logp, "GetNewSCache attempting to recycle deleted scp 0x%x", scp); if (!cm_RecycleSCache(scp, CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS)) { @@ -258,8 +261,12 @@ cm_scache_t *cm_GetNewSCache(void) /* and we're done */ return scp; } + lock_ReleaseWrite(&scp->rw); osi_Log1(afsd_logp, "GetNewSCache recycled failed scp 0x%x", scp); } else if (!(scp->flags & CM_SCACHEFLAG_INHASH)) { + if (!lock_TryWrite(&scp->rw)) + continue; + /* we found an entry, so return it */ /* now remove from the LRU queue and put it back at the * head of the LRU queue. @@ -287,6 +294,9 @@ cm_scache_t *cm_GetNewSCache(void) * we must not recycle the scp. */ if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) { if (!buf_DirtyBuffersExist(&scp->fid)) { + if (!lock_TryWrite(&scp->rw)) + continue; + if (!cm_RecycleSCache(scp, 0)) { /* we found an entry, so return it */ /* now remove from the LRU queue and put it back at the @@ -297,6 +307,7 @@ cm_scache_t *cm_GetNewSCache(void) /* and we're done */ return scp; } + lock_ReleaseWrite(&scp->rw); } else { osi_Log1(afsd_logp,"GetNewSCache dirty buffers exist scp 0x%x", scp); } @@ -315,10 +326,11 @@ cm_scache_t *cm_GetNewSCache(void) "invalid cm_scache_t address"); memset(scp, 0, sizeof(cm_scache_t)); scp->magic = CM_SCACHE_MAGIC; - lock_InitializeRWLock(&scp->rw, "cm_scache_t rw"); - lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock"); + lock_InitializeRWLock(&scp->rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE); + osi_assertx(lock_TryWrite(&scp->rw), "cm_scache_t rw held after allocation"); + lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock", LOCK_HIERARCHY_SCACHE_BUFCREATE); #ifdef USE_BPLUS - lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock"); + lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock", LOCK_HIERARCHY_SCACHE_DIRLOCK); #endif scp->serverLock = -1; @@ -371,7 +383,7 @@ void cm_fakeSCacheInit(int newFile) cm_data.fakeSCache.linkCount = 1; cm_data.fakeSCache.refCount = 1; } - lock_InitializeRWLock(&cm_data.fakeSCache.rw, "cm_scache_t rw"); + lock_InitializeRWLock(&cm_data.fakeSCache.rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE); } long @@ -523,7 +535,9 @@ cm_ShutdownSCache(void) for ( scp = cm_data.allSCachesp; scp; scp = scp->allNextp ) { if (scp->randomACLp) { + lock_ReleaseWrite(&cm_scacheLock); lock_ObtainWrite(&scp->rw); + lock_ObtainWrite(&cm_scacheLock); cm_FreeAllACLEnts(scp); lock_ReleaseWrite(&scp->rw); } @@ -557,7 +571,7 @@ void cm_InitSCache(int newFile, long maxSCaches) static osi_once_t once; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock"); + lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock", LOCK_HIERARCHY_SCACHE_GLOBAL); if ( newFile ) { memset(cm_data.scacheHashTablep, 0, sizeof(cm_scache_t *) * cm_data.scacheHashTableSize); cm_data.allSCachesp = NULL; @@ -569,10 +583,10 @@ void cm_InitSCache(int newFile, long maxSCaches) for ( scp = cm_data.allSCachesp; scp; scp = scp->allNextp ) { - lock_InitializeRWLock(&scp->rw, "cm_scache_t rw"); - lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock"); + lock_InitializeRWLock(&scp->rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE); + lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock", LOCK_HIERARCHY_SCACHE_BUFCREATE); #ifdef USE_BPLUS - lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock"); + lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock", LOCK_HIERARCHY_SCACHE_DIRLOCK); #endif scp->cbServerp = NULL; scp->cbExpires = 0; @@ -667,8 +681,8 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) { if (cm_FidCmp(fidp, &scp->fid) == 0) { #ifdef DEBUG_REFCOUNT - afsi_log("%s:%d cm_GetSCache (1) outScpp 0x%p ref %d", file, line, scp, scp->refCount); - osi_Log1(afsd_logp,"cm_GetSCache (1) outScpp 0x%p", scp); + afsi_log("%s:%d cm_GetSCache (1) scp 0x%p ref %d", file, line, scp, scp->refCount); + osi_Log1(afsd_logp,"cm_GetSCache (1) scp 0x%p", scp); #endif #ifdef AFS_FREELANCE_CLIENT if (cm_freelanceEnabled && special && @@ -727,23 +741,13 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, lock_ReleaseMutex(&cm_Freelance_Lock); lock_ObtainWrite(&cm_scacheLock); if (scp == NULL) - scp = cm_GetNewSCache(); + scp = cm_GetNewSCache(); /* returns scp->mx held */ if (scp == NULL) { osi_Log0(afsd_logp,"cm_GetSCache unable to obtain *new* scache entry"); lock_ReleaseWrite(&cm_scacheLock); return CM_ERROR_WOULDBLOCK; } -#if not_too_dangerous - /* dropping the cm_scacheLock allows more than one thread - * to obtain the same cm_scache_t from the LRU list. Since - * the refCount is known to be zero at this point we have to - * assume that no one else is using the one this is returned. - */ - lock_ReleaseWrite(&cm_scacheLock); - lock_ObtainWrite(&scp->rw); - lock_ObtainWrite(&cm_scacheLock); -#endif scp->fid = *fidp; scp->dotdotFid.cell=AFS_FAKE_ROOT_CELL_ID; scp->dotdotFid.volume=AFS_FAKE_ROOT_VOL_ID; @@ -771,15 +775,13 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, scp->dataVersion=cm_data.fakeDirVersion; scp->bufDataVersionLow=cm_data.fakeDirVersion; scp->lockDataVersion=-1; /* no lock yet */ -#if not_too_dangerous lock_ReleaseWrite(&scp->rw); -#endif *outScpp = scp; - lock_ReleaseWrite(&cm_scacheLock); #ifdef DEBUG_REFCOUNT - afsi_log("%s:%d cm_GetSCache (2) outScpp 0x%p ref %d", file, line, scp, scp->refCount); - osi_Log1(afsd_logp,"cm_GetSCache (2) outScpp 0x%p", scp); + afsi_log("%s:%d cm_GetSCache (2) scp 0x%p ref %d", file, line, scp, scp->refCount); + osi_Log1(afsd_logp,"cm_GetSCache (2) scp 0x%p", scp); #endif + lock_ReleaseWrite(&cm_scacheLock); return 0; } // end of yj code @@ -804,8 +806,8 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) { if (cm_FidCmp(fidp, &scp->fid) == 0) { #ifdef DEBUG_REFCOUNT - afsi_log("%s:%d cm_GetSCache (3) outScpp 0x%p ref %d", file, line, scp, scp->refCount); - osi_Log1(afsd_logp,"cm_GetSCache (3) outScpp 0x%p", scp); + afsi_log("%s:%d cm_GetSCache (3) scp 0x%p ref %d", file, line, scp, scp->refCount); + osi_Log1(afsd_logp,"cm_GetSCache (3) scp 0x%p", scp); #endif cm_HoldSCacheNoLock(scp); cm_AdjustScacheLRU(scp); @@ -818,7 +820,7 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, } /* now, if we don't have the fid, recycle something */ - scp = cm_GetNewSCache(); + scp = cm_GetNewSCache(); /* returns scp->mx held */ if (scp == NULL) { osi_Log0(afsd_logp,"cm_GetNewSCache unable to obtain *new* scache entry"); lock_ReleaseWrite(&cm_scacheLock); @@ -826,20 +828,13 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, cm_PutVolume(volp); return CM_ERROR_WOULDBLOCK; } - osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%x flags 0x%x", scp, scp->flags); +#ifdef DEBUG_REFCOUNT + afsi_log("%s:%d cm_GetNewSCache returns scp 0x%p flags 0x%x", file, line, scp, scp->flags); +#endif + osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%p flags 0x%x", scp, scp->flags); osi_assertx(!(scp->flags & CM_SCACHEFLAG_INHASH), "CM_SCACHEFLAG_INHASH set"); -#if not_too_dangerous - /* dropping the cm_scacheLock allows more than one thread - * to obtain the same cm_scache_t from the LRU list. Since - * the refCount is known to be zero at this point we have to - * assume that no one else is using the one this is returned. - */ - lock_ReleaseWrite(&cm_scacheLock); - lock_ObtainWrite(&scp->rw); - lock_ObtainWrite(&cm_scacheLock); -#endif scp->fid = *fidp; if (!cm_freelanceEnabled || !isRoot) { /* if this scache entry represents a volume root then we need @@ -864,11 +859,12 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, scp->nextp = cm_data.scacheHashTablep[hash]; cm_data.scacheHashTablep[hash] = scp; scp->flags |= CM_SCACHEFLAG_INHASH; - scp->refCount = 1; - osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp); -#if not_too_dangerous lock_ReleaseWrite(&scp->rw); + scp->refCount = 1; +#ifdef DEBUG_REFCOUNT + afsi_log("%s:%d cm_GetSCache sets refCount to 1 scp 0x%x", file, line, scp); #endif + osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp); /* XXX - The following fields in the cm_scache are * uninitialized: @@ -876,14 +872,14 @@ long cm_GetSCache(cm_fid_t *fidp, cm_scache_t **outScpp, cm_user_t *userp, * parentVnode * parentUnique */ - lock_ReleaseWrite(&cm_scacheLock); /* now we have a held scache entry; just return it */ *outScpp = scp; #ifdef DEBUG_REFCOUNT - afsi_log("%s:%d cm_GetSCache (4) outScpp 0x%p ref %d", file, line, scp, scp->refCount); - osi_Log1(afsd_logp,"cm_GetSCache (4) outScpp 0x%p", scp); + afsi_log("%s:%d cm_GetSCache (4) scp 0x%p ref %d", file, line, scp, scp->refCount); + osi_Log1(afsd_logp,"cm_GetSCache (4) scp 0x%p", scp); #endif + lock_ReleaseWrite(&cm_scacheLock); return 0; } @@ -1044,7 +1040,6 @@ long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *req afs_uint32 sleep_buf_cmflags = 0; afs_uint32 sleep_scp_bufs = 0; int wakeupCycle; - int getAccessRights = 1; lock_AssertWrite(&scp->rw); @@ -1249,7 +1244,7 @@ long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *req if ((rights & (PRSFS_WRITE|PRSFS_DELETE)) && (scp->flags & CM_SCACHEFLAG_RO)) return CM_ERROR_READONLY; - if (cm_HaveAccessRights(scp, userp, rights, &outRights) || !getAccessRights) { + if (cm_HaveAccessRights(scp, userp, rights, &outRights)) { if (~outRights & rights) return CM_ERROR_NOACCESS; } @@ -1264,7 +1259,6 @@ long cm_SyncOp(cm_scache_t *scp, cm_buf_t *bufp, cm_user_t *userp, cm_req_t *req } if (code) return code; - getAccessRights = 0; /* do not repeat */ continue; } } @@ -1814,12 +1808,10 @@ void cm_ReleaseSCacheNoLock(cm_scache_t *scp) #endif { afs_int32 refCount; - long lockstate; osi_assertx(scp != NULL, "null cm_scache_t"); lock_AssertAny(&cm_scacheLock); - lockstate = lock_GetRWLockState(&cm_scacheLock); refCount = InterlockedDecrement(&scp->refCount); #ifdef DEBUG_REFCOUNT if (refCount < 0) @@ -1833,16 +1825,30 @@ void cm_ReleaseSCacheNoLock(cm_scache_t *scp) if (refCount == 0 && (scp->flags & CM_SCACHEFLAG_DELETED)) { int deleted = 0; + long lockstate; + + lockstate = lock_GetRWLockState(&cm_scacheLock); if (lockstate != OSI_RWLOCK_WRITEHELD) - lock_ConvertRToW(&cm_scacheLock); + lock_ReleaseRead(&cm_scacheLock); + else + lock_ReleaseWrite(&cm_scacheLock); + lock_ObtainWrite(&scp->rw); if (scp->flags & CM_SCACHEFLAG_DELETED) deleted = 1; - lock_ReleaseWrite(&scp->rw); - if (refCount == 0 && deleted) + + if (refCount == 0 && deleted) { + lock_ObtainWrite(&cm_scacheLock); cm_RecycleSCache(scp, 0); - if (lockstate != OSI_RWLOCK_WRITEHELD) - lock_ConvertWToR(&cm_scacheLock); + if (lockstate != OSI_RWLOCK_WRITEHELD) + lock_ConvertWToR(&cm_scacheLock); + } else { + if (lockstate != OSI_RWLOCK_WRITEHELD) + lock_ObtainRead(&cm_scacheLock); + else + lock_ObtainWrite(&cm_scacheLock); + } + lock_ReleaseWrite(&scp->rw); } } @@ -1866,21 +1872,20 @@ void cm_ReleaseSCache(cm_scache_t *scp) osi_Log2(afsd_logp,"cm_ReleaseSCache scp 0x%p ref %d",scp, refCount); afsi_log("%s:%d cm_ReleaseSCache scp 0x%p ref %d", file, line, scp, refCount); #endif + lock_ReleaseRead(&cm_scacheLock); if (scp->flags & CM_SCACHEFLAG_DELETED) { int deleted = 0; lock_ObtainWrite(&scp->rw); if (scp->flags & CM_SCACHEFLAG_DELETED) deleted = 1; - lock_ReleaseWrite(&scp->rw); if (deleted) { - lock_ConvertRToW(&cm_scacheLock); + lock_ObtainWrite(&cm_scacheLock); cm_RecycleSCache(scp, 0); - lock_ConvertWToR(&cm_scacheLock); + lock_ReleaseWrite(&cm_scacheLock); } + lock_ReleaseWrite(&scp->rw); } - - lock_ReleaseRead(&cm_scacheLock); } /* just look for the scp entry to get filetype */ diff --git a/src/WINNT/afsd/cm_server.c b/src/WINNT/afsd/cm_server.c index 0a442fc81..d8c0ecf2b 100644 --- a/src/WINNT/afsd/cm_server.c +++ b/src/WINNT/afsd/cm_server.c @@ -37,7 +37,9 @@ cm_ForceNewConnectionsAllServers(void) lock_ObtainRead(&cm_serverLock); for (tsp = cm_allServersp; tsp; tsp = tsp->allNextp) { cm_GetServerNoLock(tsp); + lock_ReleaseRead(&cm_serverLock); cm_ForceNewConnections(tsp); + lock_ObtainRead(&cm_serverLock); cm_PutServerNoLock(tsp); } lock_ReleaseRead(&cm_serverLock); @@ -155,9 +157,11 @@ cm_PingServer(cm_server_t *tsp) tsp->flags |= CM_SERVERFLAG_DOWN; tsp->downTime = time(NULL); } - if (code != VRESTARTING) + if (code != VRESTARTING) { + lock_ReleaseMutex(&tsp->mx); cm_ForceNewConnections(tsp); - + lock_ObtainMutex(&tsp->mx); + } osi_Log3(afsd_logp, "cm_PingServer server %s (%s) is down with caps 0x%x", osi_LogSaveString(afsd_logp, hoststr), tsp->type == CM_SERVER_VLDB ? "vldb" : "file", @@ -418,9 +422,11 @@ void cm_CheckServers(afs_uint32 flags, cm_cell_t *cellp) tsp->flags |= CM_SERVERFLAG_DOWN; tsp->downTime = time(NULL); } - if (code != VRESTARTING) + if (code != VRESTARTING) { + lock_ReleaseMutex(&tsp->mx); cm_ForceNewConnections(tsp); - + lock_ObtainMutex(&tsp->mx); + } afs_inet_ntoa_r(tsp->addr.sin_addr.S_un.S_addr, hoststr); osi_Log3(afsd_logp, "cm_MultiPingServer server %s (%s) is down with caps 0x%x", osi_LogSaveString(afsd_logp, hoststr), @@ -547,9 +553,11 @@ void cm_CheckServers(afs_uint32 flags, cm_cell_t *cellp) tsp->flags |= CM_SERVERFLAG_DOWN; tsp->downTime = time(NULL); } - if (code != VRESTARTING) + if (code != VRESTARTING) { + lock_ReleaseMutex(&tsp->mx); cm_ForceNewConnections(tsp); - + lock_ObtainMutex(&tsp->mx); + } afs_inet_ntoa_r(tsp->addr.sin_addr.S_un.S_addr, hoststr); osi_Log3(afsd_logp, "cm_MultiPingServer server %s (%s) is down with caps 0x%x", osi_LogSaveString(afsd_logp, hoststr), @@ -702,9 +710,11 @@ void cm_CheckServers(afs_uint32 flags, cm_cell_t *cellp) tsp->flags |= CM_SERVERFLAG_DOWN; tsp->downTime = time(NULL); } - if (code != VRESTARTING) + if (code != VRESTARTING) { + lock_ReleaseMutex(&tsp->mx); cm_ForceNewConnections(tsp); - + lock_ObtainMutex(&tsp->mx); + } afs_inet_ntoa_r(tsp->addr.sin_addr.S_un.S_addr, hoststr); osi_Log3(afsd_logp, "cm_MultiPingServer server %s (%s) is down with caps 0x%x", osi_LogSaveString(afsd_logp, hoststr), @@ -762,8 +772,8 @@ void cm_InitServer(void) static osi_once_t once; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_serverLock, "cm_serverLock"); - lock_InitializeRWLock(&cm_syscfgLock, "cm_syscfgLock"); + lock_InitializeRWLock(&cm_serverLock, "cm_serverLock", LOCK_HIERARCHY_SERVER_GLOBAL); + lock_InitializeRWLock(&cm_syscfgLock, "cm_syscfgLock", LOCK_HIERARCHY_SYSCFG_GLOBAL); osi_EndOnce(&once); } } @@ -905,7 +915,7 @@ cm_server_t *cm_NewServer(struct sockaddr_in *socketp, int type, cm_cell_t *cell tsp->type = type; tsp->cellp = cellp; tsp->refCount = 1; - lock_InitializeMutex(&tsp->mx, "cm_server_t mutex"); + lock_InitializeMutex(&tsp->mx, "cm_server_t mutex", LOCK_HIERARCHY_SERVER); tsp->addr = *socketp; cm_SetServerPrefs(tsp); diff --git a/src/WINNT/afsd/cm_user.c b/src/WINNT/afsd/cm_user.c index 117d51a5c..47e00bef7 100644 --- a/src/WINNT/afsd/cm_user.c +++ b/src/WINNT/afsd/cm_user.c @@ -29,7 +29,7 @@ void cm_InitUser(void) static osi_once_t once; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_userLock, "cm_userLock"); + lock_InitializeRWLock(&cm_userLock, "cm_userLock", LOCK_HIERARCHY_USER_GLOBAL); osi_EndOnce(&once); } @@ -43,7 +43,7 @@ cm_user_t *cm_NewUser(void) userp = malloc(sizeof(*userp)); memset(userp, 0, sizeof(*userp)); userp->refCount = 1; - lock_InitializeMutex(&userp->mx, "cm_user_t"); + lock_InitializeMutex(&userp->mx, "cm_user_t", LOCK_HIERARCHY_USER); return userp; } diff --git a/src/WINNT/afsd/cm_utils.c b/src/WINNT/afsd/cm_utils.c index 52da62f7e..87cb64c2a 100644 --- a/src/WINNT/afsd/cm_utils.c +++ b/src/WINNT/afsd/cm_utils.c @@ -340,7 +340,7 @@ cm_space_t *cm_GetSpace(void) cm_space_t *tsp; if (osi_Once(&cm_utilsOnce)) { - lock_InitializeRWLock(&cm_utilsLock, "cm_utilsLock"); + lock_InitializeRWLock(&cm_utilsLock, "cm_utilsLock", LOCK_HIERARCHY_UTILS_GLOBAL); osi_EndOnce(&cm_utilsOnce); } diff --git a/src/WINNT/afsd/cm_vnodeops.c b/src/WINNT/afsd/cm_vnodeops.c index 82ec64bc4..ae730db82 100644 --- a/src/WINNT/afsd/cm_vnodeops.c +++ b/src/WINNT/afsd/cm_vnodeops.c @@ -891,6 +891,8 @@ long cm_FollowMountPoint(cm_scache_t *scp, cm_scache_t *dscp, cm_user_t *userp, size_t vnLength; int targetType; + *outScpp = NULL; + if (scp->mountRootFid.cell != 0 && scp->mountRootGen >= cm_data.mountRootGen) { tfid = scp->mountRootFid; lock_ReleaseWrite(&scp->rw); @@ -1013,7 +1015,7 @@ long cm_FollowMountPoint(cm_scache_t *scp, cm_scache_t *dscp, cm_user_t *userp, } long cm_LookupInternal(cm_scache_t *dscp, clientchar_t *cnamep, long flags, cm_user_t *userp, - cm_req_t *reqp, cm_scache_t **outpScpp) + cm_req_t *reqp, cm_scache_t **outScpp) { long code; int dnlcHit = 1; /* did we hit in the dnlc? yes, we did */ @@ -1024,6 +1026,8 @@ long cm_LookupInternal(cm_scache_t *dscp, clientchar_t *cnamep, long flags, cm_u normchar_t *nnamep = NULL; fschar_t *fnamep = NULL; + *outScpp = NULL; + memset(&rock, 0, sizeof(rock)); if (dscp->fid.vnode == 1 && dscp->fid.unique == 1 @@ -1222,7 +1226,7 @@ long cm_LookupInternal(cm_scache_t *dscp, clientchar_t *cnamep, long flags, cm_u } /* copy back pointer */ - *outpScpp = tscp; + *outScpp = tscp; /* insert scache in dnlc */ if ( !dnlcHit && !(flags & CM_FLAG_NOMOUNTCHASE) && rock.ExactFound ) { @@ -1281,7 +1285,7 @@ int cm_ExpandSysName(clientchar_t *inp, clientchar_t *outp, long outSizeCch, uns } long cm_EvaluateVolumeReference(clientchar_t * namep, long flags, cm_user_t * userp, - cm_req_t *reqp, cm_scache_t ** outpScpp) + cm_req_t *reqp, cm_scache_t ** outScpp) { afs_uint32 code = 0; fschar_t cellName[CELL_MAXNAMELEN]; @@ -1370,7 +1374,7 @@ long cm_EvaluateVolumeReference(clientchar_t * namep, long flags, cm_user_t * us cm_SetFid(&fid, cellp->cellID, volume, 1, 1); - code = cm_GetSCache(&fid, outpScpp, userp, reqp); + code = cm_GetSCache(&fid, outScpp, userp, reqp); _exit_cleanup: if (fnamep) @@ -1391,10 +1395,10 @@ long cm_EvaluateVolumeReference(clientchar_t * namep, long flags, cm_user_t * us #ifdef DEBUG_REFCOUNT long cm_LookupDbg(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *userp, - cm_req_t *reqp, cm_scache_t **outpScpp, char * file, long line) + cm_req_t *reqp, cm_scache_t **outScpp, char * file, long line) #else long cm_Lookup(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *userp, - cm_req_t *reqp, cm_scache_t **outpScpp) + cm_req_t *reqp, cm_scache_t **outScpp) #endif { long code; @@ -1416,7 +1420,7 @@ long cm_Lookup(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *us if (dscp == cm_data.rootSCachep && cm_ClientStrCmpNI(namep, _C(CM_PREFIX_VOL), CM_PREFIX_VOL_CCH) == 0) { - return cm_EvaluateVolumeReference(namep, flags, userp, reqp, outpScpp); + return cm_EvaluateVolumeReference(namep, flags, userp, reqp, outScpp); } if (cm_ExpandSysName(namep, NULL, 0, 0) > 0) { @@ -1430,7 +1434,7 @@ long cm_Lookup(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *us #endif if (code == 0) { - *outpScpp = scp; + *outScpp = scp; return 0; } if (scp) { @@ -1443,7 +1447,7 @@ long cm_Lookup(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *us afsi_log("%s:%d cm_LookupInternal (2) code 0x%x dscp 0x%p ref %d scp 0x%p ref %d", file, line, code, dscp, dscp->refCount, scp, scp ? scp->refCount : 0); osi_Log3(afsd_logp, "cm_LookupInternal (2) code 0x%x dscp 0x%p scp 0x%p", code, dscp, scp); #endif - *outpScpp = scp; + *outScpp = scp; return code; } } @@ -1453,7 +1457,7 @@ long cm_Lookup(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *us afsi_log("%s:%d cm_LookupInternal (2) code 0x%x dscp 0x%p ref %d scp 0x%p ref %d", file, line, code, dscp, dscp->refCount, scp, scp ? scp->refCount : 0); osi_Log3(afsd_logp, "cm_LookupInternal (2) code 0x%x dscp 0x%p scp 0x%p", code, dscp, scp); #endif - *outpScpp = scp; + *outScpp = scp; return code; } @@ -1804,10 +1808,12 @@ long cm_NameI(cm_scache_t *rootSCachep, clientchar_t *pathp, long flags, int fid_count = 0; /* number of fids processed in this path walk */ int i; + *outScpp = NULL; + #ifdef DEBUG_REFCOUNT afsi_log("%s:%d cm_NameI rootscp 0x%p ref %d", file, line, rootSCachep, rootSCachep->refCount); osi_Log4(afsd_logp,"cm_NameI rootscp 0x%p path %S tidpath %S flags 0x%x", - rootSCachep, pathp ? pathp : "", tidPathp ? tidPathp : "", + rootSCachep, pathp ? pathp : L"", tidPathp ? tidPathp : L"", flags); #endif @@ -2073,7 +2079,7 @@ long cm_NameI(cm_scache_t *rootSCachep, clientchar_t *pathp, long flags, cm_ReleaseSCache(tscp); #ifdef DEBUG_REFCOUNT - afsi_log("%s:%d cm_NameI code 0x%x outScpp 0x%p ref %d", file, line, code, *outScpp, (*outScpp)->refCount); + afsi_log("%s:%d cm_NameI code 0x%x outScpp 0x%p ref %d", file, line, code, *outScpp, (*outScpp) ? (*outScpp)->refCount : 0); #endif osi_Log2(afsd_logp,"cm_NameI code 0x%x outScpp 0x%p", code, *outScpp); return code; @@ -2101,6 +2107,8 @@ long cm_EvaluateSymLink(cm_scache_t *dscp, cm_scache_t *linkScp, cm_space_t *spacep; cm_scache_t *newRootScp; + *outScpp = NULL; + osi_Log1(afsd_logp, "Evaluating symlink scp 0x%p", linkScp); code = cm_AssembleLink(linkScp, "", &newRootScp, &spacep, userp, reqp); diff --git a/src/WINNT/afsd/cm_volume.c b/src/WINNT/afsd/cm_volume.c index fa2cef67a..4bf058443 100644 --- a/src/WINNT/afsd/cm_volume.c +++ b/src/WINNT/afsd/cm_volume.c @@ -83,7 +83,7 @@ void cm_InitVolume(int newFile, long maxVols) static osi_once_t once; if (osi_Once(&once)) { - lock_InitializeRWLock(&cm_volumeLock, "cm global volume lock"); + lock_InitializeRWLock(&cm_volumeLock, "cm global volume lock", LOCK_HIERARCHY_VOLUME_GLOBAL); if ( newFile ) { cm_data.allVolumesp = NULL; @@ -100,7 +100,7 @@ void cm_InitVolume(int newFile, long maxVols) for (volp = cm_data.allVolumesp; volp; volp=volp->allNextp) { afs_uint32 volType; - lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock"); + lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock", LOCK_HIERARCHY_VOLUME); volp->flags |= CM_VOLUMEFLAG_RESET; volp->flags &= ~CM_VOLUMEFLAG_UPDATING_VL; for (volType = RWVOL; volType < NUM_VOL_TYPES; volType++) { @@ -853,7 +853,7 @@ long cm_FindVolumeByName(struct cm_cell *cellp, char *volumeNamep, volp->magic = CM_VOLUME_MAGIC; volp->allNextp = cm_data.allVolumesp; cm_data.allVolumesp = volp; - lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock"); + lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock", LOCK_HIERARCHY_VOLUME); lock_ReleaseWrite(&cm_volumeLock); lock_ObtainWrite(&volp->rw); lock_ObtainWrite(&cm_volumeLock); diff --git a/src/WINNT/afsd/smb.c b/src/WINNT/afsd/smb.c index eb01076d4..fbc19fcd2 100644 --- a/src/WINNT/afsd/smb.c +++ b/src/WINNT/afsd/smb.c @@ -851,7 +851,7 @@ smb_vc_t *smb_FindVC(unsigned short lsn, int flags, int lana) vcp->uidCounter = 1; /* UID 0 is reserved for blank user */ vcp->nextp = smb_allVCsp; smb_allVCsp = vcp; - lock_InitializeMutex(&vcp->mx, "vc_t mutex"); + lock_InitializeMutex(&vcp->mx, "vc_t mutex", LOCK_HIERARCHY_SMB_VC); vcp->lsn = lsn; vcp->lana = lana; vcp->secCtx = NULL; @@ -1199,7 +1199,7 @@ smb_tid_t *smb_FindTID(smb_vc_t *vcp, unsigned short tid, int flags) tidp->vcp = vcp; smb_HoldVCNoLock(vcp); vcp->tidsp = tidp; - lock_InitializeMutex(&tidp->mx, "tid_t mutex"); + lock_InitializeMutex(&tidp->mx, "tid_t mutex", LOCK_HIERARCHY_SMB_TID); tidp->tid = tid; } #ifdef DEBUG_SMB_REFCOUNT @@ -1294,7 +1294,7 @@ smb_user_t *smb_FindUID(smb_vc_t *vcp, unsigned short uid, int flags) uidp->vcp = vcp; smb_HoldVCNoLock(vcp); vcp->usersp = uidp; - lock_InitializeMutex(&uidp->mx, "user_t mutex"); + lock_InitializeMutex(&uidp->mx, "user_t mutex", LOCK_HIERARCHY_SMB_UID); uidp->userID = uid; osi_Log3(smb_logp, "smb_FindUID vcp[0x%p] new-uid[%d] name[%S]", vcp, uidp->userID, @@ -1325,7 +1325,7 @@ smb_username_t *smb_FindUserByName(clientchar_t *usern, clientchar_t *machine, unp->name = cm_ClientStrDup(usern); unp->machine = cm_ClientStrDup(machine); usernamesp = unp; - lock_InitializeMutex(&unp->mx, "username_t mutex"); + lock_InitializeMutex(&unp->mx, "username_t mutex", LOCK_HIERARCHY_SMB_USERNAME); if (flags & SMB_FLAG_AFSLOGON) unp->flags = SMB_USERNAMEFLAG_AFSLOGON; } @@ -1571,7 +1571,7 @@ smb_fid_t *smb_FindFID(smb_vc_t *vcp, unsigned short fid, int flags) fidp->refCount = 1; fidp->vcp = vcp; smb_HoldVCNoLock(vcp); - lock_InitializeMutex(&fidp->mx, "fid_t mutex"); + lock_InitializeMutex(&fidp->mx, "fid_t mutex", LOCK_HIERARCHY_SMB_FID); fidp->fid = fid; fidp->curr_chunk = fidp->prev_chunk = -2; fidp->raw_write_event = event; @@ -2250,7 +2250,7 @@ smb_dirSearch_t *smb_NewDirSearch(int isV3) dsp->cookie = smb_dirSearchCounter; ++smb_dirSearchCounter; dsp->refCount = 1; - lock_InitializeMutex(&dsp->mx, "cm_dirSearch_t"); + lock_InitializeMutex(&dsp->mx, "cm_dirSearch_t", LOCK_HIERARCHY_SMB_DIRSEARCH); dsp->lastTime = osi_Time(); osi_QAdd((osi_queue_t **) &smb_firstDirSearchp, &dsp->q); if (!smb_lastDirSearchp) @@ -2631,7 +2631,8 @@ clientchar_t *smb_ParseString(smb_packet_t * pktp, unsigned char * inp, #endif cb = sizeof(pktp->data); } - return smb_ParseStringBuf(pktp->data, &pktp->stringsp, inp, &cb, chainpp, flags); + return smb_ParseStringBuf(pktp->data, &pktp->stringsp, inp, &cb, chainpp, + flags | SMB_STRF_SRCNULTERM); } clientchar_t *smb_ParseStringCb(smb_packet_t * pktp, unsigned char * inp, @@ -2727,7 +2728,8 @@ smb_ParseStringBuf(const unsigned char * bufbase, *stringspp = spacep; cchdest = lengthof(spacep->wdata); - cm_Utf8ToUtf16(inp, (int)*pcb_max, spacep->wdata, cchdest); + cm_Utf8ToUtf16(inp, (int)((flags & SMB_STRF_SRCNULTERM)? -1 : *pcb_max), + spacep->wdata, cchdest); return spacep->wdata; #ifdef SMB_UNICODE @@ -5917,6 +5919,8 @@ smb_Rename(smb_vc_t *vcp, smb_packet_t *inp, clientchar_t * oldPathp, clientchar /* if the call worked, stop doing the search now, since we * really only want to rename one file. */ + if (code) + osi_Log0(smb_logp, "cm_Rename failure"); osi_Log1(smb_logp, "cm_Rename returns %ld", code); } else if (code == 0) { code = CM_ERROR_NOSUCHFILE; @@ -9446,14 +9450,14 @@ void smb_Init(osi_log_t *logp, int useV3, smb_logp = logp; /* and the global lock */ - lock_InitializeRWLock(&smb_globalLock, "smb global lock"); - lock_InitializeRWLock(&smb_rctLock, "smb refct and tree struct lock"); + lock_InitializeRWLock(&smb_globalLock, "smb global lock", LOCK_HIERARCHY_SMB_GLOBAL); + lock_InitializeRWLock(&smb_rctLock, "smb refct and tree struct lock", LOCK_HIERARCHY_SMB_RCT_GLOBAL); /* Raw I/O data structures */ - lock_InitializeMutex(&smb_RawBufLock, "smb raw buffer lock"); + lock_InitializeMutex(&smb_RawBufLock, "smb raw buffer lock", LOCK_HIERARCHY_SMB_RAWBUF); - lock_InitializeMutex(&smb_ListenerLock, "smb listener lock"); - lock_InitializeMutex(&smb_StartedLock, "smb started lock"); + lock_InitializeMutex(&smb_ListenerLock, "smb listener lock", LOCK_HIERARCHY_SMB_LISTENER); + lock_InitializeMutex(&smb_StartedLock, "smb started lock", LOCK_HIERARCHY_SMB_STARTED); /* 4 Raw I/O buffers */ smb_RawBufs = calloc(65536,1); diff --git a/src/WINNT/afsd/smb.h b/src/WINNT/afsd/smb.h index b8699dacb..466732413 100644 --- a/src/WINNT/afsd/smb.h +++ b/src/WINNT/afsd/smb.h @@ -652,6 +652,7 @@ extern void smb_StripLastComponent(clientchar_t *outPathp, clientchar_t **lastCo #define SMB_STRF_FORCEASCII (1<<0) #define SMB_STRF_ANSIPATH (1<<1) #define SMB_STRF_IGNORENUL (1<<2) +#define SMB_STRF_SRCNULTERM (1<<3) extern clientchar_t *smb_ParseASCIIBlock(smb_packet_t * pktp, unsigned char *inp, char **chainpp, int flags); diff --git a/src/WINNT/afsd/smb3.c b/src/WINNT/afsd/smb3.c index dc1f940d5..bceb908b9 100644 --- a/src/WINNT/afsd/smb3.c +++ b/src/WINNT/afsd/smb3.c @@ -3070,7 +3070,11 @@ long smb_ReceiveTran2QPathInfo(smb_vc_t *vcp, smb_tran2Packet_t *p, smb_packet_t qpi.u.QPfileBasicInfo.reserved = 0; } else if (infoLevel == SMB_QUERY_FILE_STANDARD_INFO) { - smb_fid_t *fidp = smb_FindFIDByScache(vcp, scp); + smb_fid_t * fidp; + + lock_ReleaseRead(&scp->rw); + scp_rw_held = 0; + fidp = smb_FindFIDByScache(vcp, scp); qpi.u.QPfileStandardInfo.allocationSize = scp->length; qpi.u.QPfileStandardInfo.endOfFile = scp->length; @@ -3082,8 +3086,6 @@ long smb_ReceiveTran2QPathInfo(smb_vc_t *vcp, smb_tran2Packet_t *p, smb_packet_t qpi.u.QPfileStandardInfo.reserved = 0; if (fidp) { - lock_ReleaseRead(&scp->rw); - scp_rw_held = 0; lock_ObtainMutex(&fidp->mx); delonclose = fidp->flags & SMB_FID_DELONCLOSE; lock_ReleaseMutex(&fidp->mx); @@ -7505,15 +7507,14 @@ long smb_ReceiveNTTranCreate(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *out unsigned int extendedRespRequired; int realDirFlag; unsigned int desiredAccess; -#ifdef DEBUG_VERBOSE unsigned int allocSize; -#endif unsigned int shareAccess; unsigned int extAttributes; unsigned int createDisp; -#ifdef DEBUG_VERBOSE unsigned int sdLen; -#endif + unsigned int eaLen; + unsigned int impLevel; + unsigned int secFlags; unsigned int createOptions; int initialModeBits; unsigned short baseFid; @@ -7558,23 +7559,16 @@ long smb_ReceiveNTTranCreate(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *out return CM_ERROR_INVAL; baseFid = (unsigned short)lparmp[1]; desiredAccess = lparmp[2]; -#ifdef DEBUG_VERBOSE allocSize = lparmp[3]; -#endif /* DEBUG_VERSOSE */ extAttributes = lparmp[5]; shareAccess = lparmp[6]; createDisp = lparmp[7]; createOptions = lparmp[8]; -#ifdef DEBUG_VERBOSE sdLen = lparmp[9]; -#endif - nameLength = lparmp[11]; - -#ifdef DEBUG_VERBOSE - osi_Log4(smb_logp,"NTTranCreate with da[%x],ea[%x],sa[%x],cd[%x]",desiredAccess,extAttributes,shareAccess,createDisp); - osi_Log3(smb_logp,"... co[%x],sdl[%x],as[%x]",createOptions,sdLen,allocSize); - osi_Log1(smb_logp,"... flags[%x]",flags); -#endif + eaLen = lparmp[10]; + nameLength = lparmp[11]; /* spec says chars but appears to be bytes */ + impLevel = lparmp[12]; + secFlags = lparmp[13]; /* mustBeDir is never set; createOptions directory bit seems to be * more important @@ -7594,15 +7588,20 @@ long smb_ReceiveNTTranCreate(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *out if (extAttributes & SMB_ATTR_READONLY) initialModeBits &= ~0222; - pathp = smb_ParseStringCch(inp, (parmp + (13 * sizeof(ULONG)) + sizeof(UCHAR)), + pathp = smb_ParseStringCb(inp, (parmp + (13 * sizeof(ULONG)) + sizeof(UCHAR)), nameLength, NULL, SMB_STRF_ANSIPATH); - /* Sometimes path is not null-terminated, so we make a copy. */ - realPathp = malloc((nameLength+1) * sizeof(clientchar_t)); - memcpy(realPathp, pathp, nameLength * sizeof(clientchar_t)); - realPathp[nameLength] = 0; + /* Sometimes path is not nul-terminated, so we make a copy. */ + realPathp = malloc(nameLength+sizeof(clientchar_t)); + memcpy(realPathp, pathp, nameLength); + realPathp[nameLength/sizeof(clientchar_t)] = 0; spacep = cm_GetSpace(); smb_StripLastComponent(spacep->wdata, &lastNamep, realPathp); + osi_Log1(smb_logp,"NTTranCreate %S",osi_LogSaveStringW(smb_logp,realPathp)); + osi_Log4(smb_logp,"... da[%x],ea[%x],sa[%x],cd[%x]",desiredAccess,extAttributes,shareAccess,createDisp); + osi_Log4(smb_logp,"... co[%x],sdl[%x],eal[%x],as[%x],flags[%x]",createOptions,sdLen,eaLen,allocSize); + osi_Log3(smb_logp,"... imp[%x],sec[%x],flags[%x]", impLevel, secFlags, flags); + /* * Nothing here to handle SMB_IOCTL_FILENAME. * Will add it if necessary. @@ -8511,12 +8510,13 @@ void smb_NotifyChange(DWORD action, DWORD notifyFilter, (!isDirectParent && !wtree)) { osi_Log1(smb_logp," skipping fidp->scp[%x]", fidp->scp); - smb_ReleaseFID(fidp); lastWatch = watch; watch = watch->nextp; + lock_ReleaseMutex(&smb_Dir_Watch_Lock); + smb_ReleaseFID(fidp); + lock_ObtainMutex(&smb_Dir_Watch_Lock); continue; } - smb_ReleaseFID(fidp); osi_Log4(smb_logp, "Sending Change Notification for fid %d filter 0x%x wtree %d file %S", @@ -8553,6 +8553,9 @@ void smb_NotifyChange(DWORD action, DWORD notifyFilter, else lastWatch->nextp = nextWatch; + /* The watch is off the list, its ours now, safe to drop the lock */ + lock_ReleaseMutex(&smb_Dir_Watch_Lock); + /* Turn off WATCHED flag in dscp */ lock_ObtainWrite(&dscp->rw); if (wtree) @@ -8653,6 +8656,9 @@ void smb_NotifyChange(DWORD action, DWORD notifyFilter, smb_SendPacket(watch->vcp, watch); smb_FreePacket(watch); + + smb_ReleaseFID(fidp); + lock_ObtainMutex(&smb_Dir_Watch_Lock); watch = nextWatch; } lock_ReleaseMutex(&smb_Dir_Watch_Lock); @@ -8763,19 +8769,20 @@ long smb_ReceiveNTRename(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *outp) osi_Log3(smb_logp, "NTRename for [%S]->[%S] type [%s]", osi_LogSaveClientString(smb_logp, oldPathp), osi_LogSaveClientString(smb_logp, newPathp), - ((rename_type==RENAME_FLAG_RENAME)?"rename":"hardlink")); + ((rename_type==RENAME_FLAG_RENAME)?"rename":(rename_type==RENAME_FLAG_HARD_LINK)?"hardlink":"other")); if (rename_type == RENAME_FLAG_RENAME) { code = smb_Rename(vcp,inp,oldPathp,newPathp,attrs); - } else { /* RENAME_FLAG_HARD_LINK */ + } else if (rename_type == RENAME_FLAG_HARD_LINK) { /* RENAME_FLAG_HARD_LINK */ code = smb_Link(vcp,inp,oldPathp,newPathp); - } + } else + code = CM_ERROR_BADOP; return code; } void smb3_Init() { - lock_InitializeMutex(&smb_Dir_Watch_Lock, "Directory Watch List Lock"); + lock_InitializeMutex(&smb_Dir_Watch_Lock, "Directory Watch List Lock", LOCK_HIERARCHY_SMB_DIRWATCH); } cm_user_t *smb_FindCMUserByName(clientchar_t *usern, clientchar_t *machine, afs_uint32 flags) diff --git a/src/WINNT/client_creds/main.cpp b/src/WINNT/client_creds/main.cpp index 9f176104f..26b598e08 100644 --- a/src/WINNT/client_creds/main.cpp +++ b/src/WINNT/client_creds/main.cpp @@ -257,8 +257,8 @@ BOOL InitApp (LPSTR pszCmdLineA) InitCommonControls(); RegisterCheckListClass(); osi_Init(); - lock_InitializeMutex(&g.expirationCheckLock, "expiration check lock"); - lock_InitializeMutex(&g.credsLock, "global creds lock"); + lock_InitializeMutex(&g.expirationCheckLock, "expiration check lock", 0); + lock_InitializeMutex(&g.credsLock, "global creds lock", 0); KFW_AFS_wait_for_service_start(); diff --git a/src/WINNT/client_osi/osibasel.c b/src/WINNT/client_osi/osibasel.c index 09e6f2b18..c175fc21a 100644 --- a/src/WINNT/client_osi/osibasel.c +++ b/src/WINNT/client_osi/osibasel.c @@ -16,534 +16,830 @@ #include #include "osi.h" #include +#include /* atomicity-providing critical sections */ CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE]; static long atomicIndexCounter = 0; +/* Thread local storage index for lock tracking */ +static DWORD tls_LockRefH = 0; +static DWORD tls_LockRefT = 0; + void osi_BaseInit(void) { - int i; + int i; - for(i=0; itype) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ObtainWriteProc)(lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) - || (lockp->readers > 0)) { - lockp->waiters++; - osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp); - lockp->waiters--; - osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); - } - else { - /* if we're here, all clear to set the lock */ - lockp->flags |= OSI_LOCKFLAG_EXCL; - } + if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES) + osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__); - lockp->tid = thrd_Current(); + if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES) + osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__); +} - LeaveCriticalSection(csp); +osi_lock_ref_t *lock_GetLockRef(void * lockp, char type) +{ + osi_lock_ref_t * lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t)); + + memset(lockRefp, 0, sizeof(osi_lock_ref_t)); + lockRefp->type = type; + switch (type) { + case OSI_LOCK_MUTEX: + lockRefp->mx = lockp; + break; + case OSI_LOCK_RW: + lockRefp->rw = lockp; + break; + default: + osi_panic("Invalid Lock Type", __FILE__, __LINE__); + } + + return lockRefp; } -void lock_ObtainRead(osi_rwlock_t *lockp) +void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i=lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ObtainReadProc)(lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { - lockp->waiters++; - osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp); - lockp->waiters--; - osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0); - } - else { - /* if we're here, all clear to set the lock */ - lockp->readers++; - } + char msg[512]; + osi_lock_ref_t * lockRefp; + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW) { + if (lockRefp->rw == lockp) { + sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level); + osi_panic(msg, __FILE__, __LINE__); + } + if (lockRefp->rw->level > lockp->level) { + sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", + lockRefp->rw, lockRefp->rw->level, lockp, lockp->level); + osi_panic(msg, __FILE__, __LINE__); + } + } else { + if (lockRefp->mx->level > lockp->level) { + sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", + lockRefp->mx, lockRefp->mx->level, lockp, lockp->level); + osi_panic(msg, __FILE__, __LINE__); + } + osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation"); + } + } +} - LeaveCriticalSection(csp); +void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp) +{ + char msg[512]; + osi_lock_ref_t * lockRefp; + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_MUTEX) { + if (lockRefp->mx == lockp) { + sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level); + osi_panic(msg, __FILE__, __LINE__); + } + if (lockRefp->mx->level > lockp->level) { + sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", + lockRefp->mx, lockRefp->mx->level, lockp, lockp->level); + osi_panic(msg, __FILE__, __LINE__); + } + } else { + if (lockRefp->rw->level > lockp->level) { + sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d", + lockRefp->rw, lockRefp->rw->level, lockp, lockp->level); + osi_panic(msg, __FILE__, __LINE__); + } + } + } } -void lock_ReleaseRead(osi_rwlock_t *lockp) +void lock_ObtainWrite(osi_rwlock_t *lockp) { - long i; - CRITICAL_SECTION *csp; + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i=lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ObtainWriteProc)(lockp); + return; + } + + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + if (lockp->level != 0) + lock_VerifyOrderRW(lockRefH, lockRefT, lockp); + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || + (lockp->readers > 0)) { + lockp->waiters++; + osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp); + lockp->waiters--; + osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); + } + else { + /* if we're here, all clear to set the lock */ + lockp->flags |= OSI_LOCKFLAG_EXCL; + } + + lockp->tid = thrd_Current(); + + LeaveCriticalSection(csp); + + lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); + osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); +} - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ReleaseReadProc)(lockp); - return; - } +void lock_ObtainRead(osi_rwlock_t *lockp) +{ + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i=lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ObtainReadProc)(lockp); + return; + } + + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + if (lockp->level != 0) + lock_VerifyOrderRW(lockRefH, lockRefT, lockp); + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { + lockp->waiters++; + osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp); + lockp->waiters--; + osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0); + } + else { + /* if we're here, all clear to set the lock */ + lockp->readers++; + } - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); + LeaveCriticalSection(csp); - osi_assertx(lockp->readers > 0, "read lock not held"); - - /* releasing a read lock can allow readers or writers */ - if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, 0, csp); - } - else { - /* and finally release the big lock */ - LeaveCriticalSection(csp); - } + lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); + osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); } -void lock_ReleaseWrite(osi_rwlock_t *lockp) +void lock_ReleaseRead(osi_rwlock_t *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ReleaseWriteProc)(lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); - - lockp->tid = 0; - - lockp->flags &= ~OSI_LOCKFLAG_EXCL; - if (!osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, 0, csp); - } - else { - /* and finally release the big lock */ - LeaveCriticalSection(csp); - } + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ReleaseReadProc)(lockp); + return; + } + + if (lockp->level != 0) { + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { + osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); + free(lockRefp); + break; + } + } + + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(lockp->readers > 0, "read lock not held"); + + /* releasing a read lock can allow readers or writers */ + if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, 0, csp); + } + else { + /* and finally release the big lock */ + LeaveCriticalSection(csp); + } } +void lock_ReleaseWrite(osi_rwlock_t *lockp) +{ + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ReleaseWriteProc)(lockp); + return; + } + + if (lockp->level != 0) { + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { + osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); + free(lockRefp); + break; + } + } + + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); + + lockp->tid = 0; + + lockp->flags &= ~OSI_LOCKFLAG_EXCL; + if (!osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, 0, csp); + } + else { + /* and finally release the big lock */ + LeaveCriticalSection(csp); + } +} + void lock_ConvertWToR(osi_rwlock_t *lockp) { - long i; - CRITICAL_SECTION *csp; + long i; + CRITICAL_SECTION *csp; - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ConvertWToRProc)(lockp); - return; - } + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ConvertWToRProc)(lockp); + return; + } - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); - osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); - - /* convert write lock to read lock */ - lockp->flags &= ~OSI_LOCKFLAG_EXCL; - lockp->readers++; + osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); + + /* convert write lock to read lock */ + lockp->flags &= ~OSI_LOCKFLAG_EXCL; + lockp->readers++; - lockp->tid = 0; + lockp->tid = 0; - if (!osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp); - } - else { - /* and finally release the big lock */ - LeaveCriticalSection(csp); - } + if (!osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp); + } + else { + /* and finally release the big lock */ + LeaveCriticalSection(csp); + } } void lock_ConvertRToW(osi_rwlock_t *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ConvertRToWProc)(lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held"); - osi_assertx(lockp->readers > 0, "read lock not held"); - - if (--lockp->readers == 0) { - /* convert read lock to write lock */ - lockp->flags |= OSI_LOCKFLAG_EXCL; - } else { - lockp->waiters++; - osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp); - lockp->waiters--; - osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); - } - - lockp->tid = thrd_Current(); - LeaveCriticalSection(csp); -} + long i; + CRITICAL_SECTION *csp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ConvertRToWProc)(lockp); + return; + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held"); + osi_assertx(lockp->readers > 0, "read lock not held"); + + if (--lockp->readers == 0) { + /* convert read lock to write lock */ + lockp->flags |= OSI_LOCKFLAG_EXCL; + } else { + lockp->waiters++; + osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp); + lockp->waiters--; + osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); + } + + lockp->tid = thrd_Current(); + LeaveCriticalSection(csp); +} void lock_ObtainMutex(struct osi_mutex *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i=lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ObtainMutexProc)(lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { - lockp->waiters++; - osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp); - lockp->waiters--; - osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL); - } - else { - /* if we're here, all clear to set the lock */ - lockp->flags |= OSI_LOCKFLAG_EXCL; - } - lockp->tid = thrd_Current(); - LeaveCriticalSection(csp); + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i=lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ObtainMutexProc)(lockp); + return; + } + + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + if (lockp->level != 0) + lock_VerifyOrderMX(lockRefH, lockRefT, lockp); + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { + lockp->waiters++; + osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp); + lockp->waiters--; + osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL); + } + else { + /* if we're here, all clear to set the lock */ + lockp->flags |= OSI_LOCKFLAG_EXCL; + } + lockp->tid = thrd_Current(); + LeaveCriticalSection(csp); + + lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX); + osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); } void lock_ReleaseMutex(struct osi_mutex *lockp) { - long i; - CRITICAL_SECTION *csp; + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->ReleaseMutexProc)(lockp); + return; + } + + if (lockp->level != 0) { + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) { + osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); + free(lockRefp); + break; + } + } + + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held"); + + lockp->flags &= ~OSI_LOCKFLAG_EXCL; + lockp->tid = 0; + if (!osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, 0, csp); + } + else { + /* and finally release the big lock */ + LeaveCriticalSection(csp); + } +} - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->ReleaseMutexProc)(lockp); - return; - } +int lock_TryRead(struct osi_rwlock *lockp) +{ + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i=lockp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + return (osi_lockOps[i]->TryReadProc)(lockp); + + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + if (lockp->level != 0) { + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW) { + osi_assertx(lockRefp->rw != lockp, "RW Lock already held"); + } + } + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { + i = 0; + } + else { + /* if we're here, all clear to set the lock */ + lockp->readers++; + i = 1; + } - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); + LeaveCriticalSection(csp); - osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held"); - - lockp->flags &= ~OSI_LOCKFLAG_EXCL; - lockp->tid = 0; - if (!osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, 0, csp); - } - else { - /* and finally release the big lock */ - LeaveCriticalSection(csp); - } -} + if (i) { + lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); + osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } -int lock_TryRead(struct osi_rwlock *lockp) -{ - long i; - CRITICAL_SECTION *csp; - - if ((i=lockp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - return (osi_lockOps[i]->TryReadProc)(lockp); - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { - i = 0; - } - else { - /* if we're here, all clear to set the lock */ - lockp->readers++; - i = 1; - } - - LeaveCriticalSection(csp); - - return i; -} + return i; +} int lock_TryWrite(struct osi_rwlock *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i=lockp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - return (osi_lockOps[i]->TryWriteProc)(lockp); - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) - || (lockp->readers > 0)) { - i = 0; - } - else { - /* if we're here, all clear to set the lock */ - lockp->flags |= OSI_LOCKFLAG_EXCL; - i = 1; - } - - if (i) - lockp->tid = thrd_Current(); + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i=lockp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + return (osi_lockOps[i]->TryWriteProc)(lockp); + + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + if (lockp->level != 0) { + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW) { + osi_assertx(lockRefp->rw != lockp, "RW Lock already held"); + } + } + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) + || (lockp->readers > 0)) { + i = 0; + } + else { + /* if we're here, all clear to set the lock */ + lockp->flags |= OSI_LOCKFLAG_EXCL; + i = 1; + } + + if (i) + lockp->tid = thrd_Current(); - LeaveCriticalSection(csp); + LeaveCriticalSection(csp); - return i; + if (i) { + lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); + osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + return i; } int lock_TryMutex(struct osi_mutex *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i=lockp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - return (osi_lockOps[i]->TryMutexProc)(lockp); - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { - i = 0; - } - else { - /* if we're here, all clear to set the lock */ - lockp->flags |= OSI_LOCKFLAG_EXCL; - i = 1; - } - - if (i) - lockp->tid = thrd_Current(); + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i=lockp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + return (osi_lockOps[i]->TryMutexProc)(lockp); + + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + if (lockp->level != 0) { + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_MUTEX) { + osi_assertx(lockRefp->mx != lockp, "Mutex already held"); + } + } + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { + i = 0; + } + else { + /* if we're here, all clear to set the lock */ + lockp->flags |= OSI_LOCKFLAG_EXCL; + i = 1; + } + + if (i) + lockp->tid = thrd_Current(); - LeaveCriticalSection(csp); + LeaveCriticalSection(csp); - return i; + if (i) { + lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX); + osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + return i; } void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->SleepRProc)(sleepVal, lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - osi_assertx(lockp->readers > 0, "osi_SleepR: not held"); - - /* XXX better to get the list of things to wakeup from TSignalForMLs, and - * then do the wakeup after SleepSpin releases the low-level mutex. - */ - if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, 0, NULL); - } - - /* now call into scheduler to sleep atomically with releasing spin lock */ - osi_SleepSpin(sleepVal, csp); -} + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->SleepRProc)(sleepVal, lockp); + return; + } + + if (lockp->level != 0) { + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { + osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); + free(lockRefp); + break; + } + } + + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(lockp->readers > 0, "osi_SleepR: not held"); + + /* XXX better to get the list of things to wakeup from TSignalForMLs, and + * then do the wakeup after SleepSpin releases the low-level mutex. + */ + if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, 0, NULL); + } + + /* now call into scheduler to sleep atomically with releasing spin lock */ + osi_SleepSpin(sleepVal, csp); +} void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->SleepWProc)(sleepVal, lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held"); - - lockp->flags &= ~OSI_LOCKFLAG_EXCL; - if (!osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, 0, NULL); - } - - /* and finally release the big lock */ - osi_SleepSpin(sleepVal, csp); + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->SleepWProc)(sleepVal, lockp); + return; + } + + if (lockp->level != 0) { + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { + osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); + free(lockRefp); + break; + } + } + + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held"); + + lockp->flags &= ~OSI_LOCKFLAG_EXCL; + if (!osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, 0, NULL); + } + + /* and finally release the big lock */ + osi_SleepSpin(sleepVal, csp); } void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp) { - long i; - CRITICAL_SECTION *csp; - - if ((i = lockp->type) != 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->SleepMProc)(sleepVal, lockp); - return; - } - - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lockp->atomicIndex]; - EnterCriticalSection(csp); - - osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held"); + long i; + CRITICAL_SECTION *csp; + osi_queue_t * lockRefH, *lockRefT; + osi_lock_ref_t *lockRefp; + + if ((i = lockp->type) != 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->SleepMProc)(sleepVal, lockp); + return; + } + + if (lockp->level != 0) { + lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); + lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); + + for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { + if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) { + osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); + free(lockRefp); + break; + } + } + + TlsSetValue(tls_LockRefH, lockRefH); + TlsSetValue(tls_LockRefT, lockRefT); + } + + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lockp->atomicIndex]; + EnterCriticalSection(csp); + + osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held"); - lockp->flags &= ~OSI_LOCKFLAG_EXCL; - if (!osi_TEmpty(&lockp->d.turn)) { - osi_TSignalForMLs(&lockp->d.turn, 0, NULL); - } + lockp->flags &= ~OSI_LOCKFLAG_EXCL; + if (!osi_TEmpty(&lockp->d.turn)) { + osi_TSignalForMLs(&lockp->d.turn, 0, NULL); + } - /* and finally release the big lock */ - osi_SleepSpin(sleepVal, csp); + /* and finally release the big lock */ + osi_SleepSpin(sleepVal, csp); } void lock_FinalizeRWLock(osi_rwlock_t *lockp) { - long i; + long i; - if ((i=lockp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->FinalizeRWLockProc)(lockp); -} + if ((i=lockp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->FinalizeRWLockProc)(lockp); +} void lock_FinalizeMutex(osi_mutex_t *lockp) -{ - long i; +{ + long i; - if ((i=lockp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->FinalizeMutexProc)(lockp); -} + if ((i=lockp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->FinalizeMutexProc)(lockp); +} -void lock_InitializeMutex(osi_mutex_t *mp, char *namep) +void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level) { - int i; - - if ((i = osi_lockTypeDefault) > 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->InitializeMutexProc)(mp, namep); - return; - } - - /* otherwise we have the base case, which requires no special - * initialization. - */ - mp->type = 0; - mp->flags = 0; - mp->tid = 0; - mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE); - osi_TInit(&mp->d.turn); - return; + int i; + + if ((i = osi_lockTypeDefault) > 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level); + return; + } + + /* otherwise we have the base case, which requires no special + * initialization. + */ + mp->type = 0; + mp->flags = 0; + mp->tid = 0; + mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE); + mp->level = level; + osi_TInit(&mp->d.turn); + return; } -void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep) +void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level) { - int i; + int i; - if ((i = osi_lockTypeDefault) > 0) { - if (i >= 0 && i < OSI_NLOCKTYPES) - (osi_lockOps[i]->InitializeRWLockProc)(mp, namep); - return; - } + if ((i = osi_lockTypeDefault) > 0) { + if (i >= 0 && i < OSI_NLOCKTYPES) + (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level); + return; + } - /* otherwise we have the base case, which requires no special - * initialization. - */ - mp->type = 0; - mp->flags = 0; - mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE); - mp->readers = 0; - mp->tid = 0; - osi_TInit(&mp->d.turn); - return; + /* otherwise we have the base case, which requires no special + * initialization. + */ + mp->type = 0; + mp->flags = 0; + mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE); + mp->readers = 0; + mp->tid = 0; + mp->level = level; + osi_TInit(&mp->d.turn); + return; } int lock_GetRWLockState(osi_rwlock_t *lp) { - long i; - CRITICAL_SECTION *csp; + long i; + CRITICAL_SECTION *csp; - if ((i=lp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - return (osi_lockOps[i]->GetRWLockState)(lp); + if ((i=lp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + return (osi_lockOps[i]->GetRWLockState)(lp); - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[lp->atomicIndex]; - EnterCriticalSection(csp); + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[lp->atomicIndex]; + EnterCriticalSection(csp); - /* here we have the fast lock, so see if we can obtain the real lock */ - if (lp->flags & OSI_LOCKFLAG_EXCL) i = OSI_RWLOCK_WRITEHELD; - else i = 0; - if (lp->readers > 0) i |= OSI_RWLOCK_READHELD; + /* here we have the fast lock, so see if we can obtain the real lock */ + if (lp->flags & OSI_LOCKFLAG_EXCL) + i = OSI_RWLOCK_WRITEHELD; + else + i = 0; + if (lp->readers > 0) + i |= OSI_RWLOCK_READHELD; - LeaveCriticalSection(csp); + LeaveCriticalSection(csp); - return i; + return i; } -int lock_GetMutexState(struct osi_mutex *mp) { - long i; - CRITICAL_SECTION *csp; +int lock_GetMutexState(struct osi_mutex *mp) +{ + long i; + CRITICAL_SECTION *csp; - if ((i=mp->type) != 0) - if (i >= 0 && i < OSI_NLOCKTYPES) - return (osi_lockOps[i]->GetMutexState)(mp); + if ((i=mp->type) != 0) + if (i >= 0 && i < OSI_NLOCKTYPES) + return (osi_lockOps[i]->GetMutexState)(mp); - /* otherwise we're the fast base type */ - csp = &osi_baseAtomicCS[mp->atomicIndex]; - EnterCriticalSection(csp); + /* otherwise we're the fast base type */ + csp = &osi_baseAtomicCS[mp->atomicIndex]; + EnterCriticalSection(csp); - if (mp->flags & OSI_LOCKFLAG_EXCL) - i = OSI_MUTEX_HELD; - else - i = 0; + if (mp->flags & OSI_LOCKFLAG_EXCL) + i = OSI_MUTEX_HELD; + else + i = 0; - LeaveCriticalSection(csp); + LeaveCriticalSection(csp); - return i; + return i; } diff --git a/src/WINNT/client_osi/osibasel.h b/src/WINNT/client_osi/osibasel.h index 2fa6ee48a..5e54171ae 100644 --- a/src/WINNT/client_osi/osibasel.h +++ b/src/WINNT/client_osi/osibasel.h @@ -28,16 +28,17 @@ * lock using an atomic increment operation. */ typedef struct osi_mutex { - char type; /* for all types; type 0 uses atomic count */ - char flags; /* flags for base type */ - unsigned short atomicIndex; /* index of lock for low-level sync */ - DWORD tid; /* tid of thread that owns the lock */ - unsigned short waiters; /* waiters */ - unsigned short pad; - union { - void *privateDatap; /* data pointer for non-zero types */ - osi_turnstile_t turn; /* turnstile */ - } d; + char type; /* for all types; type 0 uses atomic count */ + char flags; /* flags for base type */ + unsigned short atomicIndex; /* index of lock for low-level sync */ + DWORD tid; /* tid of thread that owns the lock */ + unsigned short waiters; /* waiters */ + unsigned short pad; + union { + void *privateDatap; /* data pointer for non-zero types */ + osi_turnstile_t turn; /* turnstile */ + } d; + unsigned short level; /* locking hierarchy level */ } osi_mutex_t; /* a read/write lock. This structure has two forms. In the @@ -53,18 +54,37 @@ typedef struct osi_mutex { * This type of lock has N readers or one writer. */ typedef struct osi_rwlock { - char type; /* for all types; type 0 uses atomic count */ - char flags; /* flags for base type */ - unsigned short atomicIndex; /* index into hash table for low-level sync */ - DWORD tid; /* writer's tid */ - unsigned short waiters; /* waiters */ - unsigned short readers; /* readers */ - union { - void *privateDatap; /* data pointer for non-zero types */ - osi_turnstile_t turn; /* turnstile */ - } d; + char type; /* for all types; type 0 uses atomic count */ + char flags; /* flags for base type */ + unsigned short atomicIndex; /* index into hash table for low-level sync */ + DWORD tid; /* writer's tid */ + unsigned short waiters; /* waiters */ + unsigned short readers; /* readers */ + union { + void *privateDatap; /* data pointer for non-zero types */ + osi_turnstile_t turn; /* turnstile */ + } d; + unsigned short level; /* locking hierarchy level */ } osi_rwlock_t; + +/* + * a lock reference is a queue object that maintains a reference to a + * mutex or read/write lock object. Its intended purpose is for + * maintaining lists of lock objects on a per thread basis. + */ +typedef struct osi_lock_ref { + osi_queue_t q; + char type; + union { + osi_rwlock_t *rw; + osi_mutex_t *mx; + }; +} osi_lock_ref_t; + +#define OSI_LOCK_MUTEX 1 +#define OSI_LOCK_RW 2 + extern void lock_ObtainRead (struct osi_rwlock *); extern void lock_ObtainWrite (struct osi_rwlock *); @@ -101,9 +121,9 @@ extern CRITICAL_SECTION osi_baseAtomicCS[]; /* and define the functions that create basic locks and mutexes */ -extern void lock_InitializeRWLock(struct osi_rwlock *, char *); +extern void lock_InitializeRWLock(struct osi_rwlock *, char *, unsigned short level); -extern void lock_InitializeMutex(struct osi_mutex *, char *); +extern void lock_InitializeMutex(struct osi_mutex *, char *, unsigned short level); extern void osi_Init (void); @@ -123,6 +143,8 @@ extern void osi_BaseInit(void); /* and friendly macros */ +#define lock_AssertNone(x) osi_assertx(lock_GetRWLockState(x) == 0, "(OSI_RWLOCK_READHELD | OSI_RWLOCK_WRITEHELD)") + #define lock_AssertRead(x) osi_assertx(lock_GetRWLockState(x) & OSI_RWLOCK_READHELD, "!OSI_RWLOCK_READHELD") #define lock_AssertWrite(x) osi_assertx(lock_GetRWLockState(x) & OSI_RWLOCK_WRITEHELD, "!OSI_RWLOCK_WRITEHELD") diff --git a/src/WINNT/client_osi/osilog.c b/src/WINNT/client_osi/osilog.c index 0b513f24e..a9b5c788a 100644 --- a/src/WINNT/client_osi/osilog.c +++ b/src/WINNT/client_osi/osilog.c @@ -119,10 +119,10 @@ osi_log_t *osi_LogCreate(char *namep, size_t size) return logp; } -/* we just panic'd. Turn off all logging adding special log record - * to all enabled logs. Be careful not to wait for a lock. +/* we just panic'd. Log the error to all enabled log files. + * Be careful not to wait for a lock. */ -void osi_LogPanic(char *filep, size_t lineNumber) +void osi_LogPanic(char *msgp, char *filep, size_t lineNumber) { osi_log_t *tlp; @@ -131,9 +131,9 @@ void osi_LogPanic(char *filep, size_t lineNumber) /* otherwise, proceed */ if (filep) - osi_LogAdd(tlp, "**PANIC** (file %s:%d)", (size_t) filep, lineNumber, 0, 0, 0); + osi_LogAdd(tlp, "**PANIC** \"%s\" (file %s:%d)", (size_t)msgp, (size_t) filep, lineNumber, 0, 0); else - osi_LogAdd(tlp, "**PANIC**", 0, 0, 0, 0, 0); + osi_LogAdd(tlp, "**PANIC** \"%s\"", (size_t)msgp, 0, 0, 0, 0); /* should grab lock for this, but we're in panic, and better safe than * sorry. diff --git a/src/WINNT/client_osi/osilog.h b/src/WINNT/client_osi/osilog.h index 8e3c2afde..e186712a0 100644 --- a/src/WINNT/client_osi/osilog.h +++ b/src/WINNT/client_osi/osilog.h @@ -71,7 +71,7 @@ extern void osi_LogEnable(osi_log_t *); extern void osi_LogDisable(osi_log_t *); -extern void osi_LogPanic(char *filep, size_t line); +extern void osi_LogPanic(char *msgp, char *filep, size_t line); extern void osi_LogPrint(osi_log_t *logp, FILE_HANDLE handle); diff --git a/src/WINNT/client_osi/osiltype.h b/src/WINNT/client_osi/osiltype.h index a56af66ec..01560e3fb 100644 --- a/src/WINNT/client_osi/osiltype.h +++ b/src/WINNT/client_osi/osiltype.h @@ -30,8 +30,8 @@ typedef struct osi_lockOps { void (*SleepRProc)(LONG_PTR, struct osi_rwlock *); void (*SleepWProc)(LONG_PTR, struct osi_rwlock *); void (*SleepMProc)(LONG_PTR, struct osi_mutex *); - void (*InitializeMutexProc)(struct osi_mutex *, char *); - void (*InitializeRWLockProc)(struct osi_rwlock *, char *); + void (*InitializeMutexProc)(struct osi_mutex *, char *, unsigned short); + void (*InitializeRWLockProc)(struct osi_rwlock *, char *, unsigned short); void (*FinalizeMutexProc)(struct osi_mutex *); void (*FinalizeRWLockProc)(struct osi_rwlock *); void (*ConvertWToRProc)(struct osi_rwlock *); diff --git a/src/WINNT/client_osi/osisleep.c b/src/WINNT/client_osi/osisleep.c index b82dc6d20..a6250831d 100644 --- a/src/WINNT/client_osi/osisleep.c +++ b/src/WINNT/client_osi/osisleep.c @@ -674,10 +674,10 @@ void osi_InitPanic(void *anotifFunc) void osi_panic(char *msgp, char *filep, long line) { - osi_LogPanic(filep, line); + if (notifFunc) + (*notifFunc)(msgp, filep, line); - if (notifFunc) - (*notifFunc)(msgp, filep, line); + osi_LogPanic(msgp, filep, line); } /* get time in seconds since some relatively recent time */ -- 2.39.5