void
GenerateMiniDump(PEXCEPTION_POINTERS ep)
{
- if (IsDebuggerPresent())
- return;
+ if (IsDebuggerPresent())
+ return;
if (ep == NULL)
{
static int powerEventsRegistered = 0;
extern int powerStateSuspended = 0;
+static VOID (WINAPI* pRtlCaptureContext)(PCONTEXT ContextRecord) = NULL;
+
/*
* Notifier function for use by osi_panic
*/
static void afsd_notifier(char *msgp, char *filep, long line)
{
+ CONTEXT context;
+
if (!msgp)
msgp = "unspecified assert";
afsd_ForceTrace(TRUE);
buf_ForceTrace(TRUE);
+ if (pRtlCaptureContext) {
+ pRtlCaptureContext(&context);
+ afsd_printStack(GetCurrentThread(), &context);
+ }
+
afsi_log("--- begin dump ---");
cm_MemDumpDirStats(afsi_file, "a", 0);
cm_MemDumpBPlusStats(afsi_file, "a", 0);
cm_DumpCells(afsi_file, "a", 0);
cm_DumpVolumes(afsi_file, "a", 0);
cm_DumpSCache(afsi_file, "a", 0);
-#ifdef keisa
- cm_dnlcDump(afsi_file, "a");
-#endif
cm_DumpBufHashTable(afsi_file, "a", 0);
smb_DumpVCP(afsi_file, "a", 0);
afsi_log("--- end dump ---");
DebugBreak();
#endif
+ GenerateMiniDump(NULL);
+
SetEvent(WaitToTerminate);
#ifdef JUMP
#endif /* JUMP */
HMODULE hHookDll;
HMODULE hAdvApi32;
+ HMODULE hKernel32;
#ifdef _DEBUG
void afsd_DbgBreakAllocInit();
osi_InitPanic(afsd_notifier);
osi_InitTraceOption();
+ hKernel32 = LoadLibrary("kernel32.dll");
+ if (hKernel32 == NULL)
+ {
+ afsi_log("Fatal: cannot load kernel32.dll");
+ return;
+ }
+ pRtlCaptureContext = GetProcAddress(hKernel32, "RtlCaptureContext");
+
GlobalStatus = 0;
afsi_start();
#define RWVOL 0
#define ROVOL 1
#define BACKVOL 2
+
+#define LOCK_HIERARCHY_IGNORE 0
+
+#define LOCK_HIERARCHY_SMB_STARTED 30
+#define LOCK_HIERARCHY_SMB_LISTENER 35
+#define LOCK_HIERARCHY_SMB_GLOBAL 40
+#define LOCK_HIERARCHY_SMB_DIRSEARCH 50
+#define LOCK_HIERARCHY_SMB_FID 60
+#define LOCK_HIERARCHY_SMB_TID 70
+#define LOCK_HIERARCHY_SMB_UID 80
+#define LOCK_HIERARCHY_SMB_RAWBUF 100
+#define LOCK_HIERARCHY_SMB_DIRWATCH 105
+#define LOCK_HIERARCHY_SMB_RCT_GLOBAL 110
+#define LOCK_HIERARCHY_SMB_USERNAME 115
+#define LOCK_HIERARCHY_SMB_VC 120
+
+
+#define LOCK_HIERARCHY_DAEMON_GLOBAL 400
+
+#define LOCK_HIERARCHY_SCACHE_DIRLOCK 500
+#define LOCK_HIERARCHY_SCACHE_BUFCREATE 510
+#define LOCK_HIERARCHY_BUFFER 530
+#define LOCK_HIERARCHY_SCACHE 540
+#define LOCK_HIERARCHY_BUF_GLOBAL 550
+#define LOCK_HIERARCHY_VOLUME 560
+#define LOCK_HIERARCHY_USER 570
+#define LOCK_HIERARCHY_SCACHE_GLOBAL 580
+#define LOCK_HIERARCHY_CONN_GLOBAL 600
+#define LOCK_HIERARCHY_CELL 620
+#define LOCK_HIERARCHY_CELL_GLOBAL 630
+#define LOCK_HIERARCHY_SERVER 640
+#define LOCK_HIERARCHY_CALLBACK_GLOBAL 645
+#define LOCK_HIERARCHY_SERVER_GLOBAL 650
+#define LOCK_HIERARCHY_CONN 660
+#define LOCK_HIERARCHY_VOLUME_GLOBAL 670
+#define LOCK_HIERARCHY_DNLC_GLOBAL 690
+#define LOCK_HIERARCHY_FREELANCE_GLOBAL 700
+#define LOCK_HIERARCHY_UTILS_GLOBAL 710
+#define LOCK_HIERARCHY_OTHER_GLOBAL 720
+#define LOCK_HIERARCHY_ACL_GLOBAL 730
+#define LOCK_HIERARCHY_USER_GLOBAL 740
+#define LOCK_HIERARCHY_AFSDBSBMT_GLOBAL 1000
+#define LOCK_HIERARCHY_TOKEN_EVENT_GLOBAL 2000
+#define LOCK_HIERARCHY_SYSCFG_GLOBAL 3000
#endif /* __CM_H_ENV__ */
+
long trights;
int release = 0; /* Used to avoid a call to cm_HoldSCache in the directory case */
-#if 0
- if (scp->flags & CM_SCACHEFLAG_EACCESS) {
- *outRightsp = 0;
- return 1;
- }
-#endif
didLock = 0;
if (scp->fileType == CM_SCACHETYPE_DIRECTORY) {
aclScp = scp; /* not held, not released */
if (!aclScp)
return 0;
if (aclScp != scp) {
- code = lock_TryRead(&aclScp->rw);
- if (code == 0) {
- /* can't get lock safely and easily */
- cm_ReleaseSCache(aclScp);
- return 0;
- }
+ if (aclScp->fid.vnode < scp->fid.vnode)
+ lock_ReleaseWrite(&scp->rw);
+ lock_ObtainRead(&aclScp->rw);
+ if (aclScp->fid.vnode < scp->fid.vnode)
+ lock_ObtainWrite(&scp->rw);
/* check that we have a callback, too */
if (!cm_HaveCallback(aclScp)) {
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_FORCECB);
if (!code)
cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+ else
+ osi_Log3(afsd_logp, "GetAccessRights syncop failure scp %x user %x code %x", scp, userp, code);
} else {
/* not a dir, use parent dir's acl */
cm_SetFid(&tfid, scp->fid.cell, scp->fid.volume, scp->parentVnode, scp->parentUnique);
if (!code)
cm_SyncOpDone(aclScp, NULL,
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+ else
+ osi_Log3(afsd_logp, "GetAccessRights parent syncop failure scp %x user %x code %x", aclScp, userp, code);
lock_ReleaseWrite(&aclScp->rw);
cm_ReleaseSCache(aclScp);
lock_ObtainWrite(&scp->rw);
static osi_once_t once;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_aclLock, "cm_aclLock");
+ lock_InitializeRWLock(&cm_aclLock, "cm_aclLock", LOCK_HIERARCHY_ACL_GLOBAL);
osi_EndOnce(&once);
}
if (osi_Once(&once)) {
/* initialize global locks */
- lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
+ lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL);
if ( newFile ) {
/* remember this for those who want to reset it */
osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
bp->flags |= CM_BUF_INLRU;
- lock_InitializeMutex(&bp->mx, "Buffer mutex");
+ lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
/* grab appropriate number of bytes from aligned zone */
bp->datap = data;
data = cm_data.bufDataBaseAddress;
for (i=0; i<cm_data.buf_nbuffers; i++) {
- lock_InitializeMutex(&bp->mx, "Buffer mutex");
+ lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
bp->userp = NULL;
bp->waitCount = 0;
bp->waitRequests = 0;
for (i=0; i<nbuffers; i++) {
memset(bp, 0, sizeof(*bp));
- lock_InitializeMutex(&bp->mx, "cm_buf_t");
+ lock_InitializeMutex(&bp->mx, "cm_buf_t", LOCK_HIERARCHY_BUFFER);
/* grab appropriate number of bytes from aligned zone */
bp->datap = data;
osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
bp->flags &= ~CM_BUF_INLRU;
+ /* prepare to return it. Give it a refcount */
+ bp->refCount = 1;
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
+ afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
+#endif
/* grab the mutex so that people don't use it
* before the caller fills it with data. Again, no one
* should have been able to get to this dude to lock it.
*/
if (!lock_TryMutex(&bp->mx)) {
osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
- bp, bp->refCount);
+ bp, bp->refCount);
osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
}
- /* prepare to return it. Give it a refcount */
- bp->refCount = 1;
-#ifdef DEBUG_REFCOUNT
- osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
- afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
-#endif
lock_ReleaseWrite(&buf_globalLock);
lock_ReleaseRead(&scp->bufCreateLock);
+
*bufpp = bp;
#ifdef TESTING
extern osi_rwlock_t smb_rctLock;
extern osi_mutex_t cm_Freelance_Lock;
-extern osi_mutex_t cm_bufGetMutex;
extern osi_mutex_t cm_Afsdsbmt_Lock;
extern osi_mutex_t tokenEventLock;
extern osi_mutex_t smb_ListenerLock;
{"smb_globalLock", (char*)&smb_globalLock, LOCKTYPE_RW},
{"smb_rctLock", (char*)&smb_rctLock, LOCKTYPE_RW},
{"cm_Freelance_Lock",(char*)&cm_Freelance_Lock, LOCKTYPE_MUTEX},
- {"cm_bufGetMutex", (char*)&cm_bufGetMutex, LOCKTYPE_MUTEX},
{"cm_Afsdsbmt_Lock", (char*)&cm_Afsdsbmt_Lock, LOCKTYPE_MUTEX},
{"tokenEventLock", (char*)&tokenEventLock, LOCKTYPE_MUTEX},
{"smb_ListenerLock", (char*)&smb_ListenerLock, LOCKTYPE_MUTEX},
/* called by afsd without any locks to initialize this module */
void cm_InitCallback(void)
{
- lock_InitializeRWLock(&cm_callbackLock, "cm_callbackLock");
+ lock_InitializeRWLock(&cm_callbackLock, "cm_callbackLock", LOCK_HIERARCHY_CALLBACK_GLOBAL);
cm_activeCallbackGrantingCalls = 0;
}
|| (cm_dnsEnabled && (cp->flags & CM_CELLFLAG_DNS) &&
((cp->flags & CM_CELLFLAG_VLSERVER_INVALID)))
#endif
- ) {
+ )
+ {
+ lock_ReleaseMutex(&cp->mx);
+
/* must empty cp->vlServersp */
if (cp->vlServersp) {
cm_FreeServerList(&cp->vlServersp, CM_FREESERVERLIST_DELETE);
code = cm_SearchCellByDNS(cp->name, NULL, &ttl, cm_AddCellProc, &rock);
if (code == 0) { /* got cell from DNS */
+ lock_ObtainMutex(&cp->mx);
cp->flags |= CM_CELLFLAG_DNS;
cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID;
cp->timeout = time(0) + ttl;
+ lock_ReleaseMutex(&cp->mx);
#ifdef DEBUG
fprintf(stderr, "cell %s: ttl=%d\n", cp->name, ttl);
#endif
/* if we fail to find it this time, we'll just do nothing and leave the
* current entry alone
*/
+ lock_ObtainMutex(&cp->mx);
cp->flags |= CM_CELLFLAG_VLSERVER_INVALID;
+ lock_ReleaseMutex(&cp->mx);
}
}
} else
#endif /* AFS_AFSDB_ENV */
{
+ lock_ObtainMutex(&cp->mx);
cp->timeout = time(0) + 7200;
+ lock_ReleaseMutex(&cp->mx);
}
+ } else {
+ lock_ReleaseMutex(&cp->mx);
}
- lock_ReleaseMutex(&cp->mx);
return code ? NULL : cp;
}
}
}
- lock_ReleaseRead(&cm_cellLock);
-
if (cp) {
+ lock_ReleaseRead(&cm_cellLock);
cm_UpdateCell(cp, flags);
} else if (flags & CM_FLAG_CREATE) {
- lock_ObtainWrite(&cm_cellLock);
+ lock_ConvertRToW(&cm_cellLock);
hasWriteLock = 1;
/* when we dropped the lock the cell could have been added
cp = &cm_data.cellBaseAddress[cm_data.currentCells];
memset(cp, 0, sizeof(cm_cell_t));
cp->magic = CM_CELL_MAGIC;
-
+
+ /* the cellID cannot be 0 */
+ cp->cellID = ++cm_data.currentCells;
+
+ /* otherwise we found the cell, and so we're nearly done */
+ lock_InitializeMutex(&cp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL);
+
+ cp->name[0] = '\0'; /* No name yet */
+
+ lock_ReleaseWrite(&cm_cellLock);
+ hasWriteLock = 0;
+
rock.cellp = cp;
rock.flags = flags;
code = cm_SearchCellFile(namep, fullname, cm_AddCellProc, &rock);
cp = NULL;
goto done;
} else { /* got cell from DNS */
+ lock_ObtainWrite(&cm_cellLock);
+ hasWriteLock = 1;
cp->flags |= CM_CELLFLAG_DNS;
cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID;
cp->timeout = time(0) + ttl;
}
#endif
} else {
+ lock_ObtainWrite(&cm_cellLock);
+ hasWriteLock = 1;
cp->timeout = time(0) + 7200; /* two hour timeout */
}
/* randomise among those vlservers having the same rank*/
cm_RandomizeServer(&cp->vlServersp);
- /* otherwise we found the cell, and so we're nearly done */
- lock_InitializeMutex(&cp->mx, "cm_cell_t mutex");
-
/* copy in name */
strncpy(cp->name, fullname, CELL_MAXNAMELEN);
cp->name[CELL_MAXNAMELEN-1] = '\0';
- /* the cellID cannot be 0 */
- cp->cellID = ++cm_data.currentCells;
-
- /* append cell to global list */
+ /* append cell to global list */
if (cm_data.allCellsp == NULL) {
cm_data.allCellsp = cp;
} else {
cm_AddCellToNameHashTable(cp);
cm_AddCellToIDHashTable(cp);
+ } else {
+ lock_ReleaseRead(&cm_cellLock);
}
-
done:
if (hasWriteLock)
lock_ReleaseWrite(&cm_cellLock);
/* fullname is not valid if cp == NULL */
- if (cp && newnamep) {
- strncpy(newnamep, fullname, CELL_MAXNAMELEN);
- newnamep[CELL_MAXNAMELEN-1]='\0';
+ if (newnamep) {
+ if (cp) {
+ strncpy(newnamep, fullname, CELL_MAXNAMELEN);
+ newnamep[CELL_MAXNAMELEN-1]='\0';
+ } else {
+ newnamep[0] = '\0';
+ }
}
return cp;
}
if (osi_Once(&once)) {
cm_cell_t * cellp;
- lock_InitializeRWLock(&cm_cellLock, "cell global lock");
+ lock_InitializeRWLock(&cm_cellLock, "cell global lock", LOCK_HIERARCHY_CELL_GLOBAL);
if ( newFile ) {
cm_data.allCellsp = NULL;
memset(cellp, 0, sizeof(cm_cell_t));
cellp->magic = CM_CELL_MAGIC;
- lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex");
+ lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL);
/* copy in name */
strncpy(cellp->name, "Freelance.Local.Cell", CELL_MAXNAMELEN); /*safe*/
#endif
} else {
for (cellp = cm_data.allCellsp; cellp; cellp=cellp->allNextp) {
- lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex");
+ lock_InitializeMutex(&cellp->mx, "cm_cell_t mutex", LOCK_HIERARCHY_CELL);
cellp->vlServersp = NULL;
cellp->flags |= CM_CELLFLAG_VLSERVER_INVALID;
}
cm_serverRef_t *vlServersp; /* locked by cm_serverLock */
osi_mutex_t mx; /* mutex locking fields (flags) */
long flags; /* locked by mx */
- time_t timeout; /* if dns, time at which the server addrs expire */
+ time_t timeout; /* if dns, time at which the server addrs expire (mx) */
} cm_cell_t;
/* These are bit flag values */
HKEY parmKey;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_connLock, "connection global lock");
+ lock_InitializeRWLock(&cm_connLock, "connection global lock",
+ LOCK_HIERARCHY_CONN_GLOBAL);
/* keisa - read timeout value for lanmanworkstation service.
* jaltman - as per
if (tsrp->server == serverp && tsrp->status == srv_not_busy) {
tsrp->status = srv_busy;
if (fidp) { /* File Server query */
+ lock_ReleaseWrite(&cm_serverLock);
code = cm_FindVolumeByID(cellp, fidp->volume, userp, reqp,
CM_GETVOL_FLAG_NO_LRU_UPDATE,
&volp);
if (code == 0)
statep = cm_VolumeStateByID(volp, fidp->volume);
+ lock_ObtainWrite(&cm_serverLock);
}
break;
}
}
if (fidp) { /* File Server query */
+ lock_ReleaseWrite(&cm_serverLock);
code = cm_FindVolumeByID(cellp, fidp->volume, userp, reqp,
CM_GETVOL_FLAG_NO_LRU_UPDATE,
&volp);
if (code == 0)
cm_VolumeStateByID(volp, fidp->volume);
+ lock_ObtainWrite(&cm_serverLock);
}
}
}
serverp->connsp = tcp;
cm_HoldUser(userp);
tcp->userp = userp;
- lock_InitializeMutex(&tcp->mx, "cm_conn_t mutex");
+ lock_InitializeMutex(&tcp->mx, "cm_conn_t mutex", LOCK_HIERARCHY_CONN);
lock_ObtainMutex(&tcp->mx);
tcp->serverp = serverp;
tcp->cryptlevel = rxkad_clear;
cm_nDaemons = (nDaemons > CM_MAX_DAEMONS) ? CM_MAX_DAEMONS : nDaemons;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_daemonLock, "cm_daemonLock");
+ lock_InitializeRWLock(&cm_daemonLock, "cm_daemonLock",
+ LOCK_HIERARCHY_DAEMON_GLOBAL);
osi_EndOnce(&once);
/* creating IP Address Change monitor daemon */
extern void afsi_log(char *pattern, ...);
#endif
-osi_mutex_t cm_bufGetMutex;
#ifdef AFS_FREELANCE_CLIENT
extern osi_mutex_t cm_Freelance_Lock;
#endif
int cm_InitDCache(int newFile, long chunkSize, afs_uint64 nbuffers)
{
- lock_InitializeMutex(&cm_bufGetMutex, "buf_Get mutex");
return buf_Init(newFile, &cm_bufOps, nbuffers);
}
* sequence at a time.
*/
- // lock_ObtainMutex(&cm_bufGetMutex);
/* first hold all buffers, since we can't hold any locks in buf_Get */
while (1) {
/* stop at chunk boundary */
code = buf_Get(scp, &pageBase, &tbp);
if (code) {
- //lock_ReleaseMutex(&cm_bufGetMutex);
lock_ObtainWrite(&scp->rw);
cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
return code;
/* reserve a chunk's worth of buffers if possible */
reserving = buf_TryReserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
- // lock_ReleaseMutex(&cm_bufGetMutex);
-
pageBase = *offsetp;
collected = pageBase.LowPart & (cm_chunkSize - 1);
memset (&dnlcstats, 0, sizeof(dnlcstats));
- lock_InitializeRWLock(&cm_dnlcLock, "cm_dnlcLock");
+ lock_InitializeRWLock(&cm_dnlcLock, "cm_dnlcLock", LOCK_HIERARCHY_DNLC_GLOBAL);
if ( newFile ) {
lock_ObtainWrite(&cm_dnlcLock);
cm_data.ncfreelist = (cm_nc_t *) 0;
thread_t phandle;
int lpid;
- lock_InitializeMutex(&cm_Freelance_Lock, "Freelance Lock");
+ lock_InitializeMutex(&cm_Freelance_Lock, "Freelance Lock", LOCK_HIERARCHY_FREELANCE_GLOBAL);
// yj: first we make a call to cm_initLocalMountPoints
// to read all the local mount points from the registry
if (scp != cm_data.rootSCachep && cm_FidCmp(&scp->fid, &aFid) == 0) {
// mark the scp to be reused
cm_HoldSCacheNoLock(scp);
- lock_ReleaseWrite(&cm_Freelance_Lock);
+ lock_ReleaseMutex(&cm_Freelance_Lock);
lock_ReleaseWrite(&cm_scacheLock);
lock_ObtainWrite(&scp->rw);
cm_DiscardSCache(scp);
void cm_InitIoctl(void)
{
- lock_InitializeMutex(&cm_Afsdsbmt_Lock, "AFSDSBMT.INI Access Lock");
+ lock_InitializeMutex(&cm_Afsdsbmt_Lock, "AFSDSBMT.INI Access Lock",
+ LOCK_HIERARCHY_AFSDBSBMT_GLOBAL);
}
/*
clientchar_t *cp;
cp = cm_ParseIoctlStringAlloc(ioctlp, NULL);
- code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
+
+ code = cm_Lookup(dscp, cp[0] ? cp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
if (code)
goto done_2;
cp = cm_ParseIoctlStringAlloc(ioctlp, NULL);
- code = cm_Lookup(dscp, cp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
+ code = cm_Lookup(dscp, cp[0] ? cp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
/* if something went wrong, bail out now */
if (code)
for (cp = cm_data.allCellsp; cp; cp=cp->allNextp)
{
afs_int32 code;
- lock_ObtainMutex(&cp->mx);
+
/* delete all previous server lists - cm_FreeServerList will ask for write on cm_ServerLock*/
cm_FreeServerList(&cp->vlServersp, CM_FREESERVERLIST_DELETE);
cp->vlServersp = NULL;
+ lock_ReleaseWrite(&cm_cellLock);
+
rock.cellp = cp;
rock.flags = 0;
code = cm_SearchCellFile(cp->name, cp->name, cm_AddCellProc, &rock);
int ttl;
code = cm_SearchCellByDNS(cp->name, cp->name, &ttl, cm_AddCellProc, &rock);
if ( code == 0 ) { /* got cell from DNS */
+ lock_ObtainMutex(&cp->mx);
cp->flags |= CM_CELLFLAG_DNS;
cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID;
cp->timeout = time(0) + ttl;
+ lock_ReleaseMutex(&cp->mx);
}
}
}
else {
+ lock_ObtainMutex(&cp->mx);
cp->flags &= ~CM_CELLFLAG_DNS;
+ lock_ReleaseMutex(&cp->mx);
}
#endif /* AFS_AFSDB_ENV */
if (code) {
+ lock_ObtainMutex(&cp->mx);
cp->flags |= CM_CELLFLAG_VLSERVER_INVALID;
+ lock_ReleaseMutex(&cp->mx);
+ lock_ObtainWrite(&cm_cellLock);
}
else {
+ lock_ObtainMutex(&cp->mx);
cp->flags &= ~CM_CELLFLAG_VLSERVER_INVALID;
+ lock_ReleaseMutex(&cp->mx);
+ lock_ObtainWrite(&cm_cellLock);
cm_RandomizeServer(&cp->vlServersp);
}
- lock_ReleaseMutex(&cp->mx);
}
-
lock_ReleaseWrite(&cm_cellLock);
return 0;
}
cp = ioctlp->inDatap;
clientp = cm_Utf8ToClientStringAlloc(cp, -1, NULL);
- code = cm_Lookup(dscp, clientp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
+ code = cm_Lookup(dscp, clientp[0] ? clientp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
free(clientp);
if (code)
return code;
osi_LogEvent("cm_IoctlListlink",NULL," name[%s]",cp);
clientp = cm_Utf8ToClientStringAlloc(cp, -1, NULL);
- code = cm_Lookup(dscp, clientp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
+ code = cm_Lookup(dscp, clientp[0] ? clientp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
free(clientp);
if (code)
return code;
cp = ioctlp->inDatap;
clientp = cm_Utf8ToClientStringAlloc(cp, -1, NULL);
- code = cm_Lookup(dscp, clientp, CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
+ code = cm_Lookup(dscp, clientp[0] ? clientp : L".", CM_FLAG_NOMOUNTCHASE, userp, reqp, &scp);
/* if something went wrong, bail out now */
if (code)
ULONG listenThreadID = 0;
char * name = "afsd_rpc_ShutdownEvent";
- lock_InitializeMutex(&tokenEventLock, "token event lock");
+ lock_InitializeMutex(&tokenEventLock, "token event lock",
+ LOCK_HIERARCHY_TOKEN_EVENT_GLOBAL);
rpc_ShutdownEvent = thrd_CreateEvent(NULL, FALSE, FALSE, name);
if ( GetLastError() == ERROR_ALREADY_EXISTS )
}
}
-/* called with cm_scacheLock write-locked; recycles an existing scp.
+/* called with cm_scacheLock and scp write-locked; recycles an existing scp.
*
* this function ignores all of the locking hierarchy.
*/
return -1;
}
- lock_ObtainWrite(&scp->rw);
cm_RemoveSCacheFromHashTable(scp);
- lock_ReleaseWrite(&scp->rw);
#if 0
if (flags & CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS) {
}
-/* called with cm_scacheLock write-locked; find a vnode to recycle.
+/*
+ * called with cm_scacheLock write-locked; find a vnode to recycle.
* Can allocate a new one if desperate, or if below quota (cm_data.maxSCaches).
+ * returns scp->mx held.
*/
cm_scache_t *cm_GetNewSCache(void)
{
if (scp->refCount == 0) {
if (scp->flags & CM_SCACHEFLAG_DELETED) {
+ if (!lock_TryWrite(&scp->rw))
+ continue;
+
osi_Log1(afsd_logp, "GetNewSCache attempting to recycle deleted scp 0x%x", scp);
if (!cm_RecycleSCache(scp, CM_SCACHE_RECYCLEFLAG_DESTROY_BUFFERS)) {
/* and we're done */
return scp;
}
+ lock_ReleaseWrite(&scp->rw);
osi_Log1(afsd_logp, "GetNewSCache recycled failed scp 0x%x", scp);
} else if (!(scp->flags & CM_SCACHEFLAG_INHASH)) {
+ if (!lock_TryWrite(&scp->rw))
+ continue;
+
/* we found an entry, so return it */
/* now remove from the LRU queue and put it back at the
* head of the LRU queue.
* we must not recycle the scp. */
if (scp->refCount == 0 && scp->bufReadsp == NULL && scp->bufWritesp == NULL) {
if (!buf_DirtyBuffersExist(&scp->fid)) {
+ if (!lock_TryWrite(&scp->rw))
+ continue;
+
if (!cm_RecycleSCache(scp, 0)) {
/* we found an entry, so return it */
/* now remove from the LRU queue and put it back at the
/* and we're done */
return scp;
}
+ lock_ReleaseWrite(&scp->rw);
} else {
osi_Log1(afsd_logp,"GetNewSCache dirty buffers exist scp 0x%x", scp);
}
"invalid cm_scache_t address");
memset(scp, 0, sizeof(cm_scache_t));
scp->magic = CM_SCACHE_MAGIC;
- lock_InitializeRWLock(&scp->rw, "cm_scache_t rw");
- lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
+ lock_InitializeRWLock(&scp->rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE);
+ osi_assertx(lock_TryWrite(&scp->rw), "cm_scache_t rw held after allocation");
+ lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock", LOCK_HIERARCHY_SCACHE_BUFCREATE);
#ifdef USE_BPLUS
- lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock");
+ lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock", LOCK_HIERARCHY_SCACHE_DIRLOCK);
#endif
scp->serverLock = -1;
cm_data.fakeSCache.linkCount = 1;
cm_data.fakeSCache.refCount = 1;
}
- lock_InitializeRWLock(&cm_data.fakeSCache.rw, "cm_scache_t rw");
+ lock_InitializeRWLock(&cm_data.fakeSCache.rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE);
}
long
for ( scp = cm_data.allSCachesp; scp;
scp = scp->allNextp ) {
if (scp->randomACLp) {
+ lock_ReleaseWrite(&cm_scacheLock);
lock_ObtainWrite(&scp->rw);
+ lock_ObtainWrite(&cm_scacheLock);
cm_FreeAllACLEnts(scp);
lock_ReleaseWrite(&scp->rw);
}
static osi_once_t once;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock");
+ lock_InitializeRWLock(&cm_scacheLock, "cm_scacheLock", LOCK_HIERARCHY_SCACHE_GLOBAL);
if ( newFile ) {
memset(cm_data.scacheHashTablep, 0, sizeof(cm_scache_t *) * cm_data.scacheHashTableSize);
cm_data.allSCachesp = NULL;
for ( scp = cm_data.allSCachesp; scp;
scp = scp->allNextp ) {
- lock_InitializeRWLock(&scp->rw, "cm_scache_t rw");
- lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock");
+ lock_InitializeRWLock(&scp->rw, "cm_scache_t rw", LOCK_HIERARCHY_SCACHE);
+ lock_InitializeRWLock(&scp->bufCreateLock, "cm_scache_t bufCreateLock", LOCK_HIERARCHY_SCACHE_BUFCREATE);
#ifdef USE_BPLUS
- lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock");
+ lock_InitializeRWLock(&scp->dirlock, "cm_scache_t dirlock", LOCK_HIERARCHY_SCACHE_DIRLOCK);
#endif
scp->cbServerp = NULL;
scp->cbExpires = 0;
for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
if (cm_FidCmp(fidp, &scp->fid) == 0) {
#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_GetSCache (1) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
- osi_Log1(afsd_logp,"cm_GetSCache (1) outScpp 0x%p", scp);
+ afsi_log("%s:%d cm_GetSCache (1) scp 0x%p ref %d", file, line, scp, scp->refCount);
+ osi_Log1(afsd_logp,"cm_GetSCache (1) scp 0x%p", scp);
#endif
#ifdef AFS_FREELANCE_CLIENT
if (cm_freelanceEnabled && special &&
lock_ReleaseMutex(&cm_Freelance_Lock);
lock_ObtainWrite(&cm_scacheLock);
if (scp == NULL)
- scp = cm_GetNewSCache();
+ scp = cm_GetNewSCache(); /* returns scp->mx held */
if (scp == NULL) {
osi_Log0(afsd_logp,"cm_GetSCache unable to obtain *new* scache entry");
lock_ReleaseWrite(&cm_scacheLock);
return CM_ERROR_WOULDBLOCK;
}
-#if not_too_dangerous
- /* dropping the cm_scacheLock allows more than one thread
- * to obtain the same cm_scache_t from the LRU list. Since
- * the refCount is known to be zero at this point we have to
- * assume that no one else is using the one this is returned.
- */
- lock_ReleaseWrite(&cm_scacheLock);
- lock_ObtainWrite(&scp->rw);
- lock_ObtainWrite(&cm_scacheLock);
-#endif
scp->fid = *fidp;
scp->dotdotFid.cell=AFS_FAKE_ROOT_CELL_ID;
scp->dotdotFid.volume=AFS_FAKE_ROOT_VOL_ID;
scp->dataVersion=cm_data.fakeDirVersion;
scp->bufDataVersionLow=cm_data.fakeDirVersion;
scp->lockDataVersion=-1; /* no lock yet */
-#if not_too_dangerous
lock_ReleaseWrite(&scp->rw);
-#endif
*outScpp = scp;
- lock_ReleaseWrite(&cm_scacheLock);
#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_GetSCache (2) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
- osi_Log1(afsd_logp,"cm_GetSCache (2) outScpp 0x%p", scp);
+ afsi_log("%s:%d cm_GetSCache (2) scp 0x%p ref %d", file, line, scp, scp->refCount);
+ osi_Log1(afsd_logp,"cm_GetSCache (2) scp 0x%p", scp);
#endif
+ lock_ReleaseWrite(&cm_scacheLock);
return 0;
}
// end of yj code
for (scp=cm_data.scacheHashTablep[hash]; scp; scp=scp->nextp) {
if (cm_FidCmp(fidp, &scp->fid) == 0) {
#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_GetSCache (3) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
- osi_Log1(afsd_logp,"cm_GetSCache (3) outScpp 0x%p", scp);
+ afsi_log("%s:%d cm_GetSCache (3) scp 0x%p ref %d", file, line, scp, scp->refCount);
+ osi_Log1(afsd_logp,"cm_GetSCache (3) scp 0x%p", scp);
#endif
cm_HoldSCacheNoLock(scp);
cm_AdjustScacheLRU(scp);
}
/* now, if we don't have the fid, recycle something */
- scp = cm_GetNewSCache();
+ scp = cm_GetNewSCache(); /* returns scp->mx held */
if (scp == NULL) {
osi_Log0(afsd_logp,"cm_GetNewSCache unable to obtain *new* scache entry");
lock_ReleaseWrite(&cm_scacheLock);
cm_PutVolume(volp);
return CM_ERROR_WOULDBLOCK;
}
- osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%x flags 0x%x", scp, scp->flags);
+#ifdef DEBUG_REFCOUNT
+ afsi_log("%s:%d cm_GetNewSCache returns scp 0x%p flags 0x%x", file, line, scp, scp->flags);
+#endif
+ osi_Log2(afsd_logp,"cm_GetNewSCache returns scp 0x%p flags 0x%x", scp, scp->flags);
osi_assertx(!(scp->flags & CM_SCACHEFLAG_INHASH), "CM_SCACHEFLAG_INHASH set");
-#if not_too_dangerous
- /* dropping the cm_scacheLock allows more than one thread
- * to obtain the same cm_scache_t from the LRU list. Since
- * the refCount is known to be zero at this point we have to
- * assume that no one else is using the one this is returned.
- */
- lock_ReleaseWrite(&cm_scacheLock);
- lock_ObtainWrite(&scp->rw);
- lock_ObtainWrite(&cm_scacheLock);
-#endif
scp->fid = *fidp;
if (!cm_freelanceEnabled || !isRoot) {
/* if this scache entry represents a volume root then we need
scp->nextp = cm_data.scacheHashTablep[hash];
cm_data.scacheHashTablep[hash] = scp;
scp->flags |= CM_SCACHEFLAG_INHASH;
- scp->refCount = 1;
- osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp);
-#if not_too_dangerous
lock_ReleaseWrite(&scp->rw);
+ scp->refCount = 1;
+#ifdef DEBUG_REFCOUNT
+ afsi_log("%s:%d cm_GetSCache sets refCount to 1 scp 0x%x", file, line, scp);
#endif
+ osi_Log1(afsd_logp,"cm_GetSCache sets refCount to 1 scp 0x%x", scp);
/* XXX - The following fields in the cm_scache are
* uninitialized:
* parentVnode
* parentUnique
*/
- lock_ReleaseWrite(&cm_scacheLock);
/* now we have a held scache entry; just return it */
*outScpp = scp;
#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_GetSCache (4) outScpp 0x%p ref %d", file, line, scp, scp->refCount);
- osi_Log1(afsd_logp,"cm_GetSCache (4) outScpp 0x%p", scp);
+ afsi_log("%s:%d cm_GetSCache (4) scp 0x%p ref %d", file, line, scp, scp->refCount);
+ osi_Log1(afsd_logp,"cm_GetSCache (4) scp 0x%p", scp);
#endif
+ lock_ReleaseWrite(&cm_scacheLock);
return 0;
}
afs_uint32 sleep_buf_cmflags = 0;
afs_uint32 sleep_scp_bufs = 0;
int wakeupCycle;
- int getAccessRights = 1;
lock_AssertWrite(&scp->rw);
if ((rights & (PRSFS_WRITE|PRSFS_DELETE)) && (scp->flags & CM_SCACHEFLAG_RO))
return CM_ERROR_READONLY;
- if (cm_HaveAccessRights(scp, userp, rights, &outRights) || !getAccessRights) {
+ if (cm_HaveAccessRights(scp, userp, rights, &outRights)) {
if (~outRights & rights)
return CM_ERROR_NOACCESS;
}
}
if (code)
return code;
- getAccessRights = 0; /* do not repeat */
continue;
}
}
#endif
{
afs_int32 refCount;
- long lockstate;
osi_assertx(scp != NULL, "null cm_scache_t");
lock_AssertAny(&cm_scacheLock);
- lockstate = lock_GetRWLockState(&cm_scacheLock);
refCount = InterlockedDecrement(&scp->refCount);
#ifdef DEBUG_REFCOUNT
if (refCount < 0)
if (refCount == 0 && (scp->flags & CM_SCACHEFLAG_DELETED)) {
int deleted = 0;
+ long lockstate;
+
+ lockstate = lock_GetRWLockState(&cm_scacheLock);
if (lockstate != OSI_RWLOCK_WRITEHELD)
- lock_ConvertRToW(&cm_scacheLock);
+ lock_ReleaseRead(&cm_scacheLock);
+ else
+ lock_ReleaseWrite(&cm_scacheLock);
+
lock_ObtainWrite(&scp->rw);
if (scp->flags & CM_SCACHEFLAG_DELETED)
deleted = 1;
- lock_ReleaseWrite(&scp->rw);
- if (refCount == 0 && deleted)
+
+ if (refCount == 0 && deleted) {
+ lock_ObtainWrite(&cm_scacheLock);
cm_RecycleSCache(scp, 0);
- if (lockstate != OSI_RWLOCK_WRITEHELD)
- lock_ConvertWToR(&cm_scacheLock);
+ if (lockstate != OSI_RWLOCK_WRITEHELD)
+ lock_ConvertWToR(&cm_scacheLock);
+ } else {
+ if (lockstate != OSI_RWLOCK_WRITEHELD)
+ lock_ObtainRead(&cm_scacheLock);
+ else
+ lock_ObtainWrite(&cm_scacheLock);
+ }
+ lock_ReleaseWrite(&scp->rw);
}
}
osi_Log2(afsd_logp,"cm_ReleaseSCache scp 0x%p ref %d",scp, refCount);
afsi_log("%s:%d cm_ReleaseSCache scp 0x%p ref %d", file, line, scp, refCount);
#endif
+ lock_ReleaseRead(&cm_scacheLock);
if (scp->flags & CM_SCACHEFLAG_DELETED) {
int deleted = 0;
lock_ObtainWrite(&scp->rw);
if (scp->flags & CM_SCACHEFLAG_DELETED)
deleted = 1;
- lock_ReleaseWrite(&scp->rw);
if (deleted) {
- lock_ConvertRToW(&cm_scacheLock);
+ lock_ObtainWrite(&cm_scacheLock);
cm_RecycleSCache(scp, 0);
- lock_ConvertWToR(&cm_scacheLock);
+ lock_ReleaseWrite(&cm_scacheLock);
}
+ lock_ReleaseWrite(&scp->rw);
}
-
- lock_ReleaseRead(&cm_scacheLock);
}
/* just look for the scp entry to get filetype */
lock_ObtainRead(&cm_serverLock);
for (tsp = cm_allServersp; tsp; tsp = tsp->allNextp) {
cm_GetServerNoLock(tsp);
+ lock_ReleaseRead(&cm_serverLock);
cm_ForceNewConnections(tsp);
+ lock_ObtainRead(&cm_serverLock);
cm_PutServerNoLock(tsp);
}
lock_ReleaseRead(&cm_serverLock);
tsp->flags |= CM_SERVERFLAG_DOWN;
tsp->downTime = time(NULL);
}
- if (code != VRESTARTING)
+ if (code != VRESTARTING) {
+ lock_ReleaseMutex(&tsp->mx);
cm_ForceNewConnections(tsp);
-
+ lock_ObtainMutex(&tsp->mx);
+ }
osi_Log3(afsd_logp, "cm_PingServer server %s (%s) is down with caps 0x%x",
osi_LogSaveString(afsd_logp, hoststr),
tsp->type == CM_SERVER_VLDB ? "vldb" : "file",
tsp->flags |= CM_SERVERFLAG_DOWN;
tsp->downTime = time(NULL);
}
- if (code != VRESTARTING)
+ if (code != VRESTARTING) {
+ lock_ReleaseMutex(&tsp->mx);
cm_ForceNewConnections(tsp);
-
+ lock_ObtainMutex(&tsp->mx);
+ }
afs_inet_ntoa_r(tsp->addr.sin_addr.S_un.S_addr, hoststr);
osi_Log3(afsd_logp, "cm_MultiPingServer server %s (%s) is down with caps 0x%x",
osi_LogSaveString(afsd_logp, hoststr),
tsp->flags |= CM_SERVERFLAG_DOWN;
tsp->downTime = time(NULL);
}
- if (code != VRESTARTING)
+ if (code != VRESTARTING) {
+ lock_ReleaseMutex(&tsp->mx);
cm_ForceNewConnections(tsp);
-
+ lock_ObtainMutex(&tsp->mx);
+ }
afs_inet_ntoa_r(tsp->addr.sin_addr.S_un.S_addr, hoststr);
osi_Log3(afsd_logp, "cm_MultiPingServer server %s (%s) is down with caps 0x%x",
osi_LogSaveString(afsd_logp, hoststr),
tsp->flags |= CM_SERVERFLAG_DOWN;
tsp->downTime = time(NULL);
}
- if (code != VRESTARTING)
+ if (code != VRESTARTING) {
+ lock_ReleaseMutex(&tsp->mx);
cm_ForceNewConnections(tsp);
-
+ lock_ObtainMutex(&tsp->mx);
+ }
afs_inet_ntoa_r(tsp->addr.sin_addr.S_un.S_addr, hoststr);
osi_Log3(afsd_logp, "cm_MultiPingServer server %s (%s) is down with caps 0x%x",
osi_LogSaveString(afsd_logp, hoststr),
static osi_once_t once;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_serverLock, "cm_serverLock");
- lock_InitializeRWLock(&cm_syscfgLock, "cm_syscfgLock");
+ lock_InitializeRWLock(&cm_serverLock, "cm_serverLock", LOCK_HIERARCHY_SERVER_GLOBAL);
+ lock_InitializeRWLock(&cm_syscfgLock, "cm_syscfgLock", LOCK_HIERARCHY_SYSCFG_GLOBAL);
osi_EndOnce(&once);
}
}
tsp->type = type;
tsp->cellp = cellp;
tsp->refCount = 1;
- lock_InitializeMutex(&tsp->mx, "cm_server_t mutex");
+ lock_InitializeMutex(&tsp->mx, "cm_server_t mutex", LOCK_HIERARCHY_SERVER);
tsp->addr = *socketp;
cm_SetServerPrefs(tsp);
static osi_once_t once;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_userLock, "cm_userLock");
+ lock_InitializeRWLock(&cm_userLock, "cm_userLock", LOCK_HIERARCHY_USER_GLOBAL);
osi_EndOnce(&once);
}
userp = malloc(sizeof(*userp));
memset(userp, 0, sizeof(*userp));
userp->refCount = 1;
- lock_InitializeMutex(&userp->mx, "cm_user_t");
+ lock_InitializeMutex(&userp->mx, "cm_user_t", LOCK_HIERARCHY_USER);
return userp;
}
cm_space_t *tsp;
if (osi_Once(&cm_utilsOnce)) {
- lock_InitializeRWLock(&cm_utilsLock, "cm_utilsLock");
+ lock_InitializeRWLock(&cm_utilsLock, "cm_utilsLock", LOCK_HIERARCHY_UTILS_GLOBAL);
osi_EndOnce(&cm_utilsOnce);
}
size_t vnLength;
int targetType;
+ *outScpp = NULL;
+
if (scp->mountRootFid.cell != 0 && scp->mountRootGen >= cm_data.mountRootGen) {
tfid = scp->mountRootFid;
lock_ReleaseWrite(&scp->rw);
}
long cm_LookupInternal(cm_scache_t *dscp, clientchar_t *cnamep, long flags, cm_user_t *userp,
- cm_req_t *reqp, cm_scache_t **outpScpp)
+ cm_req_t *reqp, cm_scache_t **outScpp)
{
long code;
int dnlcHit = 1; /* did we hit in the dnlc? yes, we did */
normchar_t *nnamep = NULL;
fschar_t *fnamep = NULL;
+ *outScpp = NULL;
+
memset(&rock, 0, sizeof(rock));
if (dscp->fid.vnode == 1 && dscp->fid.unique == 1
}
/* copy back pointer */
- *outpScpp = tscp;
+ *outScpp = tscp;
/* insert scache in dnlc */
if ( !dnlcHit && !(flags & CM_FLAG_NOMOUNTCHASE) && rock.ExactFound ) {
}
long cm_EvaluateVolumeReference(clientchar_t * namep, long flags, cm_user_t * userp,
- cm_req_t *reqp, cm_scache_t ** outpScpp)
+ cm_req_t *reqp, cm_scache_t ** outScpp)
{
afs_uint32 code = 0;
fschar_t cellName[CELL_MAXNAMELEN];
cm_SetFid(&fid, cellp->cellID, volume, 1, 1);
- code = cm_GetSCache(&fid, outpScpp, userp, reqp);
+ code = cm_GetSCache(&fid, outScpp, userp, reqp);
_exit_cleanup:
if (fnamep)
#ifdef DEBUG_REFCOUNT
long cm_LookupDbg(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *userp,
- cm_req_t *reqp, cm_scache_t **outpScpp, char * file, long line)
+ cm_req_t *reqp, cm_scache_t **outScpp, char * file, long line)
#else
long cm_Lookup(cm_scache_t *dscp, clientchar_t *namep, long flags, cm_user_t *userp,
- cm_req_t *reqp, cm_scache_t **outpScpp)
+ cm_req_t *reqp, cm_scache_t **outScpp)
#endif
{
long code;
if (dscp == cm_data.rootSCachep &&
cm_ClientStrCmpNI(namep, _C(CM_PREFIX_VOL), CM_PREFIX_VOL_CCH) == 0) {
- return cm_EvaluateVolumeReference(namep, flags, userp, reqp, outpScpp);
+ return cm_EvaluateVolumeReference(namep, flags, userp, reqp, outScpp);
}
if (cm_ExpandSysName(namep, NULL, 0, 0) > 0) {
#endif
if (code == 0) {
- *outpScpp = scp;
+ *outScpp = scp;
return 0;
}
if (scp) {
afsi_log("%s:%d cm_LookupInternal (2) code 0x%x dscp 0x%p ref %d scp 0x%p ref %d", file, line, code, dscp, dscp->refCount, scp, scp ? scp->refCount : 0);
osi_Log3(afsd_logp, "cm_LookupInternal (2) code 0x%x dscp 0x%p scp 0x%p", code, dscp, scp);
#endif
- *outpScpp = scp;
+ *outScpp = scp;
return code;
}
}
afsi_log("%s:%d cm_LookupInternal (2) code 0x%x dscp 0x%p ref %d scp 0x%p ref %d", file, line, code, dscp, dscp->refCount, scp, scp ? scp->refCount : 0);
osi_Log3(afsd_logp, "cm_LookupInternal (2) code 0x%x dscp 0x%p scp 0x%p", code, dscp, scp);
#endif
- *outpScpp = scp;
+ *outScpp = scp;
return code;
}
int fid_count = 0; /* number of fids processed in this path walk */
int i;
+ *outScpp = NULL;
+
#ifdef DEBUG_REFCOUNT
afsi_log("%s:%d cm_NameI rootscp 0x%p ref %d", file, line, rootSCachep, rootSCachep->refCount);
osi_Log4(afsd_logp,"cm_NameI rootscp 0x%p path %S tidpath %S flags 0x%x",
- rootSCachep, pathp ? pathp : "<NULL>", tidPathp ? tidPathp : "<NULL>",
+ rootSCachep, pathp ? pathp : L"<NULL>", tidPathp ? tidPathp : L"<NULL>",
flags);
#endif
cm_ReleaseSCache(tscp);
#ifdef DEBUG_REFCOUNT
- afsi_log("%s:%d cm_NameI code 0x%x outScpp 0x%p ref %d", file, line, code, *outScpp, (*outScpp)->refCount);
+ afsi_log("%s:%d cm_NameI code 0x%x outScpp 0x%p ref %d", file, line, code, *outScpp, (*outScpp) ? (*outScpp)->refCount : 0);
#endif
osi_Log2(afsd_logp,"cm_NameI code 0x%x outScpp 0x%p", code, *outScpp);
return code;
cm_space_t *spacep;
cm_scache_t *newRootScp;
+ *outScpp = NULL;
+
osi_Log1(afsd_logp, "Evaluating symlink scp 0x%p", linkScp);
code = cm_AssembleLink(linkScp, "", &newRootScp, &spacep, userp, reqp);
static osi_once_t once;
if (osi_Once(&once)) {
- lock_InitializeRWLock(&cm_volumeLock, "cm global volume lock");
+ lock_InitializeRWLock(&cm_volumeLock, "cm global volume lock", LOCK_HIERARCHY_VOLUME_GLOBAL);
if ( newFile ) {
cm_data.allVolumesp = NULL;
for (volp = cm_data.allVolumesp; volp; volp=volp->allNextp) {
afs_uint32 volType;
- lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock");
+ lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock", LOCK_HIERARCHY_VOLUME);
volp->flags |= CM_VOLUMEFLAG_RESET;
volp->flags &= ~CM_VOLUMEFLAG_UPDATING_VL;
for (volType = RWVOL; volType < NUM_VOL_TYPES; volType++) {
volp->magic = CM_VOLUME_MAGIC;
volp->allNextp = cm_data.allVolumesp;
cm_data.allVolumesp = volp;
- lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock");
+ lock_InitializeRWLock(&volp->rw, "cm_volume_t rwlock", LOCK_HIERARCHY_VOLUME);
lock_ReleaseWrite(&cm_volumeLock);
lock_ObtainWrite(&volp->rw);
lock_ObtainWrite(&cm_volumeLock);
vcp->uidCounter = 1; /* UID 0 is reserved for blank user */
vcp->nextp = smb_allVCsp;
smb_allVCsp = vcp;
- lock_InitializeMutex(&vcp->mx, "vc_t mutex");
+ lock_InitializeMutex(&vcp->mx, "vc_t mutex", LOCK_HIERARCHY_SMB_VC);
vcp->lsn = lsn;
vcp->lana = lana;
vcp->secCtx = NULL;
tidp->vcp = vcp;
smb_HoldVCNoLock(vcp);
vcp->tidsp = tidp;
- lock_InitializeMutex(&tidp->mx, "tid_t mutex");
+ lock_InitializeMutex(&tidp->mx, "tid_t mutex", LOCK_HIERARCHY_SMB_TID);
tidp->tid = tid;
}
#ifdef DEBUG_SMB_REFCOUNT
uidp->vcp = vcp;
smb_HoldVCNoLock(vcp);
vcp->usersp = uidp;
- lock_InitializeMutex(&uidp->mx, "user_t mutex");
+ lock_InitializeMutex(&uidp->mx, "user_t mutex", LOCK_HIERARCHY_SMB_UID);
uidp->userID = uid;
osi_Log3(smb_logp, "smb_FindUID vcp[0x%p] new-uid[%d] name[%S]",
vcp, uidp->userID,
unp->name = cm_ClientStrDup(usern);
unp->machine = cm_ClientStrDup(machine);
usernamesp = unp;
- lock_InitializeMutex(&unp->mx, "username_t mutex");
+ lock_InitializeMutex(&unp->mx, "username_t mutex", LOCK_HIERARCHY_SMB_USERNAME);
if (flags & SMB_FLAG_AFSLOGON)
unp->flags = SMB_USERNAMEFLAG_AFSLOGON;
}
fidp->refCount = 1;
fidp->vcp = vcp;
smb_HoldVCNoLock(vcp);
- lock_InitializeMutex(&fidp->mx, "fid_t mutex");
+ lock_InitializeMutex(&fidp->mx, "fid_t mutex", LOCK_HIERARCHY_SMB_FID);
fidp->fid = fid;
fidp->curr_chunk = fidp->prev_chunk = -2;
fidp->raw_write_event = event;
dsp->cookie = smb_dirSearchCounter;
++smb_dirSearchCounter;
dsp->refCount = 1;
- lock_InitializeMutex(&dsp->mx, "cm_dirSearch_t");
+ lock_InitializeMutex(&dsp->mx, "cm_dirSearch_t", LOCK_HIERARCHY_SMB_DIRSEARCH);
dsp->lastTime = osi_Time();
osi_QAdd((osi_queue_t **) &smb_firstDirSearchp, &dsp->q);
if (!smb_lastDirSearchp)
#endif
cb = sizeof(pktp->data);
}
- return smb_ParseStringBuf(pktp->data, &pktp->stringsp, inp, &cb, chainpp, flags);
+ return smb_ParseStringBuf(pktp->data, &pktp->stringsp, inp, &cb, chainpp,
+ flags | SMB_STRF_SRCNULTERM);
}
clientchar_t *smb_ParseStringCb(smb_packet_t * pktp, unsigned char * inp,
*stringspp = spacep;
cchdest = lengthof(spacep->wdata);
- cm_Utf8ToUtf16(inp, (int)*pcb_max, spacep->wdata, cchdest);
+ cm_Utf8ToUtf16(inp, (int)((flags & SMB_STRF_SRCNULTERM)? -1 : *pcb_max),
+ spacep->wdata, cchdest);
return spacep->wdata;
#ifdef SMB_UNICODE
/* if the call worked, stop doing the search now, since we
* really only want to rename one file.
*/
+ if (code)
+ osi_Log0(smb_logp, "cm_Rename failure");
osi_Log1(smb_logp, "cm_Rename returns %ld", code);
} else if (code == 0) {
code = CM_ERROR_NOSUCHFILE;
smb_logp = logp;
/* and the global lock */
- lock_InitializeRWLock(&smb_globalLock, "smb global lock");
- lock_InitializeRWLock(&smb_rctLock, "smb refct and tree struct lock");
+ lock_InitializeRWLock(&smb_globalLock, "smb global lock", LOCK_HIERARCHY_SMB_GLOBAL);
+ lock_InitializeRWLock(&smb_rctLock, "smb refct and tree struct lock", LOCK_HIERARCHY_SMB_RCT_GLOBAL);
/* Raw I/O data structures */
- lock_InitializeMutex(&smb_RawBufLock, "smb raw buffer lock");
+ lock_InitializeMutex(&smb_RawBufLock, "smb raw buffer lock", LOCK_HIERARCHY_SMB_RAWBUF);
- lock_InitializeMutex(&smb_ListenerLock, "smb listener lock");
- lock_InitializeMutex(&smb_StartedLock, "smb started lock");
+ lock_InitializeMutex(&smb_ListenerLock, "smb listener lock", LOCK_HIERARCHY_SMB_LISTENER);
+ lock_InitializeMutex(&smb_StartedLock, "smb started lock", LOCK_HIERARCHY_SMB_STARTED);
/* 4 Raw I/O buffers */
smb_RawBufs = calloc(65536,1);
#define SMB_STRF_FORCEASCII (1<<0)
#define SMB_STRF_ANSIPATH (1<<1)
#define SMB_STRF_IGNORENUL (1<<2)
+#define SMB_STRF_SRCNULTERM (1<<3)
extern clientchar_t *smb_ParseASCIIBlock(smb_packet_t * pktp, unsigned char *inp,
char **chainpp, int flags);
qpi.u.QPfileBasicInfo.reserved = 0;
}
else if (infoLevel == SMB_QUERY_FILE_STANDARD_INFO) {
- smb_fid_t *fidp = smb_FindFIDByScache(vcp, scp);
+ smb_fid_t * fidp;
+
+ lock_ReleaseRead(&scp->rw);
+ scp_rw_held = 0;
+ fidp = smb_FindFIDByScache(vcp, scp);
qpi.u.QPfileStandardInfo.allocationSize = scp->length;
qpi.u.QPfileStandardInfo.endOfFile = scp->length;
qpi.u.QPfileStandardInfo.reserved = 0;
if (fidp) {
- lock_ReleaseRead(&scp->rw);
- scp_rw_held = 0;
lock_ObtainMutex(&fidp->mx);
delonclose = fidp->flags & SMB_FID_DELONCLOSE;
lock_ReleaseMutex(&fidp->mx);
unsigned int extendedRespRequired;
int realDirFlag;
unsigned int desiredAccess;
-#ifdef DEBUG_VERBOSE
unsigned int allocSize;
-#endif
unsigned int shareAccess;
unsigned int extAttributes;
unsigned int createDisp;
-#ifdef DEBUG_VERBOSE
unsigned int sdLen;
-#endif
+ unsigned int eaLen;
+ unsigned int impLevel;
+ unsigned int secFlags;
unsigned int createOptions;
int initialModeBits;
unsigned short baseFid;
return CM_ERROR_INVAL;
baseFid = (unsigned short)lparmp[1];
desiredAccess = lparmp[2];
-#ifdef DEBUG_VERBOSE
allocSize = lparmp[3];
-#endif /* DEBUG_VERSOSE */
extAttributes = lparmp[5];
shareAccess = lparmp[6];
createDisp = lparmp[7];
createOptions = lparmp[8];
-#ifdef DEBUG_VERBOSE
sdLen = lparmp[9];
-#endif
- nameLength = lparmp[11];
-
-#ifdef DEBUG_VERBOSE
- osi_Log4(smb_logp,"NTTranCreate with da[%x],ea[%x],sa[%x],cd[%x]",desiredAccess,extAttributes,shareAccess,createDisp);
- osi_Log3(smb_logp,"... co[%x],sdl[%x],as[%x]",createOptions,sdLen,allocSize);
- osi_Log1(smb_logp,"... flags[%x]",flags);
-#endif
+ eaLen = lparmp[10];
+ nameLength = lparmp[11]; /* spec says chars but appears to be bytes */
+ impLevel = lparmp[12];
+ secFlags = lparmp[13];
/* mustBeDir is never set; createOptions directory bit seems to be
* more important
if (extAttributes & SMB_ATTR_READONLY)
initialModeBits &= ~0222;
- pathp = smb_ParseStringCch(inp, (parmp + (13 * sizeof(ULONG)) + sizeof(UCHAR)),
+ pathp = smb_ParseStringCb(inp, (parmp + (13 * sizeof(ULONG)) + sizeof(UCHAR)),
nameLength, NULL, SMB_STRF_ANSIPATH);
- /* Sometimes path is not null-terminated, so we make a copy. */
- realPathp = malloc((nameLength+1) * sizeof(clientchar_t));
- memcpy(realPathp, pathp, nameLength * sizeof(clientchar_t));
- realPathp[nameLength] = 0;
+ /* Sometimes path is not nul-terminated, so we make a copy. */
+ realPathp = malloc(nameLength+sizeof(clientchar_t));
+ memcpy(realPathp, pathp, nameLength);
+ realPathp[nameLength/sizeof(clientchar_t)] = 0;
spacep = cm_GetSpace();
smb_StripLastComponent(spacep->wdata, &lastNamep, realPathp);
+ osi_Log1(smb_logp,"NTTranCreate %S",osi_LogSaveStringW(smb_logp,realPathp));
+ osi_Log4(smb_logp,"... da[%x],ea[%x],sa[%x],cd[%x]",desiredAccess,extAttributes,shareAccess,createDisp);
+ osi_Log4(smb_logp,"... co[%x],sdl[%x],eal[%x],as[%x],flags[%x]",createOptions,sdLen,eaLen,allocSize);
+ osi_Log3(smb_logp,"... imp[%x],sec[%x],flags[%x]", impLevel, secFlags, flags);
+
/*
* Nothing here to handle SMB_IOCTL_FILENAME.
* Will add it if necessary.
(!isDirectParent && !wtree))
{
osi_Log1(smb_logp," skipping fidp->scp[%x]", fidp->scp);
- smb_ReleaseFID(fidp);
lastWatch = watch;
watch = watch->nextp;
+ lock_ReleaseMutex(&smb_Dir_Watch_Lock);
+ smb_ReleaseFID(fidp);
+ lock_ObtainMutex(&smb_Dir_Watch_Lock);
continue;
}
- smb_ReleaseFID(fidp);
osi_Log4(smb_logp,
"Sending Change Notification for fid %d filter 0x%x wtree %d file %S",
else
lastWatch->nextp = nextWatch;
+ /* The watch is off the list, its ours now, safe to drop the lock */
+ lock_ReleaseMutex(&smb_Dir_Watch_Lock);
+
/* Turn off WATCHED flag in dscp */
lock_ObtainWrite(&dscp->rw);
if (wtree)
smb_SendPacket(watch->vcp, watch);
smb_FreePacket(watch);
+
+ smb_ReleaseFID(fidp);
+ lock_ObtainMutex(&smb_Dir_Watch_Lock);
watch = nextWatch;
}
lock_ReleaseMutex(&smb_Dir_Watch_Lock);
osi_Log3(smb_logp, "NTRename for [%S]->[%S] type [%s]",
osi_LogSaveClientString(smb_logp, oldPathp),
osi_LogSaveClientString(smb_logp, newPathp),
- ((rename_type==RENAME_FLAG_RENAME)?"rename":"hardlink"));
+ ((rename_type==RENAME_FLAG_RENAME)?"rename":(rename_type==RENAME_FLAG_HARD_LINK)?"hardlink":"other"));
if (rename_type == RENAME_FLAG_RENAME) {
code = smb_Rename(vcp,inp,oldPathp,newPathp,attrs);
- } else { /* RENAME_FLAG_HARD_LINK */
+ } else if (rename_type == RENAME_FLAG_HARD_LINK) { /* RENAME_FLAG_HARD_LINK */
code = smb_Link(vcp,inp,oldPathp,newPathp);
- }
+ } else
+ code = CM_ERROR_BADOP;
return code;
}
void smb3_Init()
{
- lock_InitializeMutex(&smb_Dir_Watch_Lock, "Directory Watch List Lock");
+ lock_InitializeMutex(&smb_Dir_Watch_Lock, "Directory Watch List Lock", LOCK_HIERARCHY_SMB_DIRWATCH);
}
cm_user_t *smb_FindCMUserByName(clientchar_t *usern, clientchar_t *machine, afs_uint32 flags)
InitCommonControls();
RegisterCheckListClass();
osi_Init();
- lock_InitializeMutex(&g.expirationCheckLock, "expiration check lock");
- lock_InitializeMutex(&g.credsLock, "global creds lock");
+ lock_InitializeMutex(&g.expirationCheckLock, "expiration check lock", 0);
+ lock_InitializeMutex(&g.credsLock, "global creds lock", 0);
KFW_AFS_wait_for_service_start();
/* misc definitions */
/* large int */
-#ifndef DJGPP
#include <rpc.h>
#if !defined(_MSC_VER) || (_MSC_VER < 1300)
#include "largeint.h"
#endif
#include "osithrdnt.h"
-#else /* DJGPP */
-#include "largeint95.h"
-#endif /* !DJGPP */
typedef LARGE_INTEGER osi_hyper_t;
#if _MSC_VER >= 1300
#define LargeIntegerNotEqualToZero(a) ((a).HighPart || (a).LowPart)
#endif
-#ifndef DJGPP
typedef GUID osi_uid_t;
-#else /* DJGPP */
-typedef int osi_uid_t;
-#endif /* !DJGPP */
-
typedef int int32;
-#ifndef DJGPP
/* basic util functions */
#include "osiutils.h"
/* lock type definitions */
#include "osiltype.h"
-#endif /* !DJGPP */
/* basic sleep operations */
#include "osisleep.h"
-#ifndef DJGPP
/* base lock definitions */
#include "osibasel.h"
/* RPC debug stuff */
#include "osidb.h"
-#else /* DJGPP */
-#include "osithrd95.h"
-#endif /* !DJGPP */
/* log stuff */
#include "osilog.h"
#include <windows.h>
#include "osi.h"
#include <assert.h>
+#include <stdio.h>
/* atomicity-providing critical sections */
CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
static long atomicIndexCounter = 0;
+/* Thread local storage index for lock tracking */
+static DWORD tls_LockRefH = 0;
+static DWORD tls_LockRefT = 0;
+
void osi_BaseInit(void)
{
- int i;
+ int i;
- for(i=0; i<OSI_MUTEXHASHSIZE; i++)
- InitializeCriticalSection(&osi_baseAtomicCS[i]);
-}
+ for(i=0; i<OSI_MUTEXHASHSIZE; i++)
+ InitializeCriticalSection(&osi_baseAtomicCS[i]);
-void lock_ObtainWrite(osi_rwlock_t *lockp)
-{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i=lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ObtainWriteProc)(lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
- || (lockp->readers > 0)) {
- lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
- lockp->waiters--;
- osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
- }
- else {
- /* if we're here, all clear to set the lock */
- lockp->flags |= OSI_LOCKFLAG_EXCL;
- }
+ if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+ osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
- lockp->tid = thrd_Current();
+ if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+ osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
+}
- LeaveCriticalSection(csp);
+osi_lock_ref_t *lock_GetLockRef(void * lockp, char type)
+{
+ osi_lock_ref_t * lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
+
+ memset(lockRefp, 0, sizeof(osi_lock_ref_t));
+ lockRefp->type = type;
+ switch (type) {
+ case OSI_LOCK_MUTEX:
+ lockRefp->mx = lockp;
+ break;
+ case OSI_LOCK_RW:
+ lockRefp->rw = lockp;
+ break;
+ default:
+ osi_panic("Invalid Lock Type", __FILE__, __LINE__);
+ }
+
+ return lockRefp;
}
-void lock_ObtainRead(osi_rwlock_t *lockp)
+void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i=lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ObtainReadProc)(lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
- lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
- lockp->waiters--;
- osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
- }
- else {
- /* if we're here, all clear to set the lock */
- lockp->readers++;
- }
+ char msg[512];
+ osi_lock_ref_t * lockRefp;
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW) {
+ if (lockRefp->rw == lockp) {
+ sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
+ osi_panic(msg, __FILE__, __LINE__);
+ }
+ if (lockRefp->rw->level > lockp->level) {
+ sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
+ lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
+ osi_panic(msg, __FILE__, __LINE__);
+ }
+ } else {
+ if (lockRefp->mx->level > lockp->level) {
+ sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
+ lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
+ osi_panic(msg, __FILE__, __LINE__);
+ }
+ osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
+ }
+ }
+}
- LeaveCriticalSection(csp);
+void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
+{
+ char msg[512];
+ osi_lock_ref_t * lockRefp;
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_MUTEX) {
+ if (lockRefp->mx == lockp) {
+ sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
+ osi_panic(msg, __FILE__, __LINE__);
+ }
+ if (lockRefp->mx->level > lockp->level) {
+ sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
+ lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
+ osi_panic(msg, __FILE__, __LINE__);
+ }
+ } else {
+ if (lockRefp->rw->level > lockp->level) {
+ sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
+ lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
+ osi_panic(msg, __FILE__, __LINE__);
+ }
+ }
+ }
}
-void lock_ReleaseRead(osi_rwlock_t *lockp)
+void lock_ObtainWrite(osi_rwlock_t *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i=lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ObtainWriteProc)(lockp);
+ return;
+ }
+
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ if (lockp->level != 0)
+ lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
+ (lockp->readers > 0)) {
+ lockp->waiters++;
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ lockp->waiters--;
+ osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
+ }
+ else {
+ /* if we're here, all clear to set the lock */
+ lockp->flags |= OSI_LOCKFLAG_EXCL;
+ }
+
+ lockp->tid = thrd_Current();
+
+ LeaveCriticalSection(csp);
+
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+}
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ReleaseReadProc)(lockp);
- return;
- }
+void lock_ObtainRead(osi_rwlock_t *lockp)
+{
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i=lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ObtainReadProc)(lockp);
+ return;
+ }
+
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ if (lockp->level != 0)
+ lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
+ lockp->waiters++;
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
+ lockp->waiters--;
+ osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
+ }
+ else {
+ /* if we're here, all clear to set the lock */
+ lockp->readers++;
+ }
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
+ LeaveCriticalSection(csp);
- osi_assertx(lockp->readers > 0, "read lock not held");
-
- /* releasing a read lock can allow readers or writers */
- if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, 0, csp);
- }
- else {
- /* and finally release the big lock */
- LeaveCriticalSection(csp);
- }
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
}
-void lock_ReleaseWrite(osi_rwlock_t *lockp)
+void lock_ReleaseRead(osi_rwlock_t *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ReleaseWriteProc)(lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
-
- lockp->tid = 0;
-
- lockp->flags &= ~OSI_LOCKFLAG_EXCL;
- if (!osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, 0, csp);
- }
- else {
- /* and finally release the big lock */
- LeaveCriticalSection(csp);
- }
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ReleaseReadProc)(lockp);
+ return;
+ }
+
+ if (lockp->level != 0) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
+ osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
+ free(lockRefp);
+ break;
+ }
+ }
+
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(lockp->readers > 0, "read lock not held");
+
+ /* releasing a read lock can allow readers or writers */
+ if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, 0, csp);
+ }
+ else {
+ /* and finally release the big lock */
+ LeaveCriticalSection(csp);
+ }
}
+void lock_ReleaseWrite(osi_rwlock_t *lockp)
+{
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ReleaseWriteProc)(lockp);
+ return;
+ }
+
+ if (lockp->level != 0) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
+ osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
+ free(lockRefp);
+ break;
+ }
+ }
+
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
+
+ lockp->tid = 0;
+
+ lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ if (!osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, 0, csp);
+ }
+ else {
+ /* and finally release the big lock */
+ LeaveCriticalSection(csp);
+ }
+}
+
void lock_ConvertWToR(osi_rwlock_t *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
+ long i;
+ CRITICAL_SECTION *csp;
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ConvertWToRProc)(lockp);
- return;
- }
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ConvertWToRProc)(lockp);
+ return;
+ }
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
- osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
-
- /* convert write lock to read lock */
- lockp->flags &= ~OSI_LOCKFLAG_EXCL;
- lockp->readers++;
+ osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
+
+ /* convert write lock to read lock */
+ lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ lockp->readers++;
- lockp->tid = 0;
+ lockp->tid = 0;
- if (!osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
- }
- else {
- /* and finally release the big lock */
- LeaveCriticalSection(csp);
- }
+ if (!osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
+ }
+ else {
+ /* and finally release the big lock */
+ LeaveCriticalSection(csp);
+ }
}
void lock_ConvertRToW(osi_rwlock_t *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ConvertRToWProc)(lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
- osi_assertx(lockp->readers > 0, "read lock not held");
-
- if (--lockp->readers == 0) {
- /* convert read lock to write lock */
- lockp->flags |= OSI_LOCKFLAG_EXCL;
- } else {
- lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
- lockp->waiters--;
- osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
- }
-
- lockp->tid = thrd_Current();
- LeaveCriticalSection(csp);
-}
+ long i;
+ CRITICAL_SECTION *csp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ConvertRToWProc)(lockp);
+ return;
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
+ osi_assertx(lockp->readers > 0, "read lock not held");
+
+ if (--lockp->readers == 0) {
+ /* convert read lock to write lock */
+ lockp->flags |= OSI_LOCKFLAG_EXCL;
+ } else {
+ lockp->waiters++;
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ lockp->waiters--;
+ osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
+ }
+
+ lockp->tid = thrd_Current();
+ LeaveCriticalSection(csp);
+}
void lock_ObtainMutex(struct osi_mutex *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i=lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ObtainMutexProc)(lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
- lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
- lockp->waiters--;
- osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
- }
- else {
- /* if we're here, all clear to set the lock */
- lockp->flags |= OSI_LOCKFLAG_EXCL;
- }
- lockp->tid = thrd_Current();
- LeaveCriticalSection(csp);
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i=lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ObtainMutexProc)(lockp);
+ return;
+ }
+
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ if (lockp->level != 0)
+ lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
+ lockp->waiters++;
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ lockp->waiters--;
+ osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
+ }
+ else {
+ /* if we're here, all clear to set the lock */
+ lockp->flags |= OSI_LOCKFLAG_EXCL;
+ }
+ lockp->tid = thrd_Current();
+ LeaveCriticalSection(csp);
+
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
}
void lock_ReleaseMutex(struct osi_mutex *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->ReleaseMutexProc)(lockp);
+ return;
+ }
+
+ if (lockp->level != 0) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
+ osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
+ free(lockRefp);
+ break;
+ }
+ }
+
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
+
+ lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ lockp->tid = 0;
+ if (!osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, 0, csp);
+ }
+ else {
+ /* and finally release the big lock */
+ LeaveCriticalSection(csp);
+ }
+}
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->ReleaseMutexProc)(lockp);
- return;
- }
+int lock_TryRead(struct osi_rwlock *lockp)
+{
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i=lockp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ return (osi_lockOps[i]->TryReadProc)(lockp);
+
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ if (lockp->level != 0) {
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW) {
+ osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
+ }
+ }
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
+ i = 0;
+ }
+ else {
+ /* if we're here, all clear to set the lock */
+ lockp->readers++;
+ i = 1;
+ }
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
+ LeaveCriticalSection(csp);
- osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
-
- lockp->flags &= ~OSI_LOCKFLAG_EXCL;
- lockp->tid = 0;
- if (!osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, 0, csp);
- }
- else {
- /* and finally release the big lock */
- LeaveCriticalSection(csp);
- }
-}
+ if (i) {
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
-int lock_TryRead(struct osi_rwlock *lockp)
-{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i=lockp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- return (osi_lockOps[i]->TryReadProc)(lockp);
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
- i = 0;
- }
- else {
- /* if we're here, all clear to set the lock */
- lockp->readers++;
- i = 1;
- }
-
- LeaveCriticalSection(csp);
-
- return i;
-}
+ return i;
+}
int lock_TryWrite(struct osi_rwlock *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i=lockp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- return (osi_lockOps[i]->TryWriteProc)(lockp);
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
- || (lockp->readers > 0)) {
- i = 0;
- }
- else {
- /* if we're here, all clear to set the lock */
- lockp->flags |= OSI_LOCKFLAG_EXCL;
- i = 1;
- }
-
- if (i)
- lockp->tid = thrd_Current();
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i=lockp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ return (osi_lockOps[i]->TryWriteProc)(lockp);
+
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ if (lockp->level != 0) {
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW) {
+ osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
+ }
+ }
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
+ || (lockp->readers > 0)) {
+ i = 0;
+ }
+ else {
+ /* if we're here, all clear to set the lock */
+ lockp->flags |= OSI_LOCKFLAG_EXCL;
+ i = 1;
+ }
+
+ if (i)
+ lockp->tid = thrd_Current();
- LeaveCriticalSection(csp);
+ LeaveCriticalSection(csp);
- return i;
+ if (i) {
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ return i;
}
int lock_TryMutex(struct osi_mutex *lockp) {
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i=lockp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- return (osi_lockOps[i]->TryMutexProc)(lockp);
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
- i = 0;
- }
- else {
- /* if we're here, all clear to set the lock */
- lockp->flags |= OSI_LOCKFLAG_EXCL;
- i = 1;
- }
-
- if (i)
- lockp->tid = thrd_Current();
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i=lockp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ return (osi_lockOps[i]->TryMutexProc)(lockp);
+
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ if (lockp->level != 0) {
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_MUTEX) {
+ osi_assertx(lockRefp->mx != lockp, "Mutex already held");
+ }
+ }
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
+ i = 0;
+ }
+ else {
+ /* if we're here, all clear to set the lock */
+ lockp->flags |= OSI_LOCKFLAG_EXCL;
+ i = 1;
+ }
+
+ if (i)
+ lockp->tid = thrd_Current();
- LeaveCriticalSection(csp);
+ LeaveCriticalSection(csp);
- return i;
+ if (i) {
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+ return i;
}
void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
-
- /* XXX better to get the list of things to wakeup from TSignalForMLs, and
- * then do the wakeup after SleepSpin releases the low-level mutex.
- */
- if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
- }
-
- /* now call into scheduler to sleep atomically with releasing spin lock */
- osi_SleepSpin(sleepVal, csp);
-}
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
+ return;
+ }
+
+ if (lockp->level != 0) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
+ osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
+ free(lockRefp);
+ break;
+ }
+ }
+
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
+
+ /* XXX better to get the list of things to wakeup from TSignalForMLs, and
+ * then do the wakeup after SleepSpin releases the low-level mutex.
+ */
+ if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
+ }
+
+ /* now call into scheduler to sleep atomically with releasing spin lock */
+ osi_SleepSpin(sleepVal, csp);
+}
void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
-
- lockp->flags &= ~OSI_LOCKFLAG_EXCL;
- if (!osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
- }
-
- /* and finally release the big lock */
- osi_SleepSpin(sleepVal, csp);
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
+ return;
+ }
+
+ if (lockp->level != 0) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
+ osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
+ free(lockRefp);
+ break;
+ }
+ }
+
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
+
+ lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ if (!osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
+ }
+
+ /* and finally release the big lock */
+ osi_SleepSpin(sleepVal, csp);
}
void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
{
- long i;
- CRITICAL_SECTION *csp;
-
- if ((i = lockp->type) != 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
- return;
- }
-
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lockp->atomicIndex];
- EnterCriticalSection(csp);
-
- osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
+ long i;
+ CRITICAL_SECTION *csp;
+ osi_queue_t * lockRefH, *lockRefT;
+ osi_lock_ref_t *lockRefp;
+
+ if ((i = lockp->type) != 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
+ return;
+ }
+
+ if (lockp->level != 0) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
+ osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
+ free(lockRefp);
+ break;
+ }
+ }
+
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lockp->atomicIndex];
+ EnterCriticalSection(csp);
+
+ osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
- lockp->flags &= ~OSI_LOCKFLAG_EXCL;
- if (!osi_TEmpty(&lockp->d.turn)) {
- osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
- }
+ lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ if (!osi_TEmpty(&lockp->d.turn)) {
+ osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
+ }
- /* and finally release the big lock */
- osi_SleepSpin(sleepVal, csp);
+ /* and finally release the big lock */
+ osi_SleepSpin(sleepVal, csp);
}
void lock_FinalizeRWLock(osi_rwlock_t *lockp)
{
- long i;
+ long i;
- if ((i=lockp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
-}
+ if ((i=lockp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
+}
void lock_FinalizeMutex(osi_mutex_t *lockp)
-{
- long i;
+{
+ long i;
- if ((i=lockp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->FinalizeMutexProc)(lockp);
-}
+ if ((i=lockp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->FinalizeMutexProc)(lockp);
+}
-void lock_InitializeMutex(osi_mutex_t *mp, char *namep)
+void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
{
- int i;
-
- if ((i = osi_lockTypeDefault) > 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->InitializeMutexProc)(mp, namep);
- return;
- }
-
- /* otherwise we have the base case, which requires no special
- * initialization.
- */
- mp->type = 0;
- mp->flags = 0;
- mp->tid = 0;
- mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
- osi_TInit(&mp->d.turn);
- return;
+ int i;
+
+ if ((i = osi_lockTypeDefault) > 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
+ return;
+ }
+
+ /* otherwise we have the base case, which requires no special
+ * initialization.
+ */
+ mp->type = 0;
+ mp->flags = 0;
+ mp->tid = 0;
+ mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
+ mp->level = level;
+ osi_TInit(&mp->d.turn);
+ return;
}
-void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep)
+void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
{
- int i;
+ int i;
- if ((i = osi_lockTypeDefault) > 0) {
- if (i >= 0 && i < OSI_NLOCKTYPES)
- (osi_lockOps[i]->InitializeRWLockProc)(mp, namep);
- return;
- }
+ if ((i = osi_lockTypeDefault) > 0) {
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
+ return;
+ }
- /* otherwise we have the base case, which requires no special
- * initialization.
- */
- mp->type = 0;
- mp->flags = 0;
- mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
- mp->readers = 0;
- mp->tid = 0;
- osi_TInit(&mp->d.turn);
- return;
+ /* otherwise we have the base case, which requires no special
+ * initialization.
+ */
+ mp->type = 0;
+ mp->flags = 0;
+ mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
+ mp->readers = 0;
+ mp->tid = 0;
+ mp->level = level;
+ osi_TInit(&mp->d.turn);
+ return;
}
int lock_GetRWLockState(osi_rwlock_t *lp)
{
- long i;
- CRITICAL_SECTION *csp;
+ long i;
+ CRITICAL_SECTION *csp;
- if ((i=lp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- return (osi_lockOps[i]->GetRWLockState)(lp);
+ if ((i=lp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ return (osi_lockOps[i]->GetRWLockState)(lp);
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[lp->atomicIndex];
- EnterCriticalSection(csp);
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[lp->atomicIndex];
+ EnterCriticalSection(csp);
- /* here we have the fast lock, so see if we can obtain the real lock */
- if (lp->flags & OSI_LOCKFLAG_EXCL) i = OSI_RWLOCK_WRITEHELD;
- else i = 0;
- if (lp->readers > 0) i |= OSI_RWLOCK_READHELD;
+ /* here we have the fast lock, so see if we can obtain the real lock */
+ if (lp->flags & OSI_LOCKFLAG_EXCL)
+ i = OSI_RWLOCK_WRITEHELD;
+ else
+ i = 0;
+ if (lp->readers > 0)
+ i |= OSI_RWLOCK_READHELD;
- LeaveCriticalSection(csp);
+ LeaveCriticalSection(csp);
- return i;
+ return i;
}
-int lock_GetMutexState(struct osi_mutex *mp) {
- long i;
- CRITICAL_SECTION *csp;
+int lock_GetMutexState(struct osi_mutex *mp)
+{
+ long i;
+ CRITICAL_SECTION *csp;
- if ((i=mp->type) != 0)
- if (i >= 0 && i < OSI_NLOCKTYPES)
- return (osi_lockOps[i]->GetMutexState)(mp);
+ if ((i=mp->type) != 0)
+ if (i >= 0 && i < OSI_NLOCKTYPES)
+ return (osi_lockOps[i]->GetMutexState)(mp);
- /* otherwise we're the fast base type */
- csp = &osi_baseAtomicCS[mp->atomicIndex];
- EnterCriticalSection(csp);
+ /* otherwise we're the fast base type */
+ csp = &osi_baseAtomicCS[mp->atomicIndex];
+ EnterCriticalSection(csp);
- if (mp->flags & OSI_LOCKFLAG_EXCL)
- i = OSI_MUTEX_HELD;
- else
- i = 0;
+ if (mp->flags & OSI_LOCKFLAG_EXCL)
+ i = OSI_MUTEX_HELD;
+ else
+ i = 0;
- LeaveCriticalSection(csp);
+ LeaveCriticalSection(csp);
- return i;
+ return i;
}
* lock using an atomic increment operation.
*/
typedef struct osi_mutex {
- char type; /* for all types; type 0 uses atomic count */
- char flags; /* flags for base type */
- unsigned short atomicIndex; /* index of lock for low-level sync */
- DWORD tid; /* tid of thread that owns the lock */
- unsigned short waiters; /* waiters */
- unsigned short pad;
- union {
- void *privateDatap; /* data pointer for non-zero types */
- osi_turnstile_t turn; /* turnstile */
- } d;
+ char type; /* for all types; type 0 uses atomic count */
+ char flags; /* flags for base type */
+ unsigned short atomicIndex; /* index of lock for low-level sync */
+ DWORD tid; /* tid of thread that owns the lock */
+ unsigned short waiters; /* waiters */
+ unsigned short pad;
+ union {
+ void *privateDatap; /* data pointer for non-zero types */
+ osi_turnstile_t turn; /* turnstile */
+ } d;
+ unsigned short level; /* locking hierarchy level */
} osi_mutex_t;
/* a read/write lock. This structure has two forms. In the
* This type of lock has N readers or one writer.
*/
typedef struct osi_rwlock {
- char type; /* for all types; type 0 uses atomic count */
- char flags; /* flags for base type */
- unsigned short atomicIndex; /* index into hash table for low-level sync */
- DWORD tid; /* writer's tid */
- unsigned short waiters; /* waiters */
- unsigned short readers; /* readers */
- union {
- void *privateDatap; /* data pointer for non-zero types */
- osi_turnstile_t turn; /* turnstile */
- } d;
+ char type; /* for all types; type 0 uses atomic count */
+ char flags; /* flags for base type */
+ unsigned short atomicIndex; /* index into hash table for low-level sync */
+ DWORD tid; /* writer's tid */
+ unsigned short waiters; /* waiters */
+ unsigned short readers; /* readers */
+ union {
+ void *privateDatap; /* data pointer for non-zero types */
+ osi_turnstile_t turn; /* turnstile */
+ } d;
+ unsigned short level; /* locking hierarchy level */
} osi_rwlock_t;
+
+/*
+ * a lock reference is a queue object that maintains a reference to a
+ * mutex or read/write lock object. Its intended purpose is for
+ * maintaining lists of lock objects on a per thread basis.
+ */
+typedef struct osi_lock_ref {
+ osi_queue_t q;
+ char type;
+ union {
+ osi_rwlock_t *rw;
+ osi_mutex_t *mx;
+ };
+} osi_lock_ref_t;
+
+#define OSI_LOCK_MUTEX 1
+#define OSI_LOCK_RW 2
+
extern void lock_ObtainRead (struct osi_rwlock *);
extern void lock_ObtainWrite (struct osi_rwlock *);
/* and define the functions that create basic locks and mutexes */
-extern void lock_InitializeRWLock(struct osi_rwlock *, char *);
+extern void lock_InitializeRWLock(struct osi_rwlock *, char *, unsigned short level);
-extern void lock_InitializeMutex(struct osi_mutex *, char *);
+extern void lock_InitializeMutex(struct osi_mutex *, char *, unsigned short level);
extern void osi_Init (void);
/* and friendly macros */
+#define lock_AssertNone(x) osi_assertx(lock_GetRWLockState(x) == 0, "(OSI_RWLOCK_READHELD | OSI_RWLOCK_WRITEHELD)")
+
#define lock_AssertRead(x) osi_assertx(lock_GetRWLockState(x) & OSI_RWLOCK_READHELD, "!OSI_RWLOCK_READHELD")
#define lock_AssertWrite(x) osi_assertx(lock_GetRWLockState(x) & OSI_RWLOCK_WRITEHELD, "!OSI_RWLOCK_WRITEHELD")
#include <afs/param.h>
#include <afs/stds.h>
-#ifndef DJGPP
#include <windows.h>
#include <rpc.h>
#include "dbrpc.h"
-#endif /* !DJGPP */
#include <malloc.h>
#include "osi.h"
#include <assert.h>
osi_fdOps_t osi_TypeFDOps = {
osi_FDTypeCreate,
-#ifndef DJGPP
osi_FDTypeGetInfo,
-#endif
osi_FDTypeClose
};
}
-#ifndef DJGPP
long osi_FDTypeGetInfo(osi_fd_t *ifdp, osi_remGetInfoParms_t *outp)
{
osi_typeFD_t *fdp;
return OSI_DBRPC_EOF;
}
}
-#endif /* !DJGPP */
long osi_FDTypeClose(osi_fd_t *ifdp)
{
#include <afs/param.h>
#include <afs/stds.h>
-#ifndef DJGPP
#include <windows.h>
#include <rpc.h>
-#endif /* !DJGPP */
#include <malloc.h>
#include "osi.h"
-#ifndef DJGPP
#include "dbrpc.h"
-#endif /* !DJGPP */
#include <stdio.h>
#include <assert.h>
#include <WINNT\afsreg.h>
osi_fdOps_t osi_logFDOps = {
osi_LogFDCreate,
-#ifndef DJGPP
osi_LogFDGetInfo,
-#endif
osi_LogFDClose
};
LARGE_INTEGER bigTemp;
LARGE_INTEGER bigJunk;
-#ifndef DJGPP
if (osi_Once(&osi_logOnce)) {
QueryPerformanceFrequency(&bigFreq);
if (bigFreq.LowPart == 0 && bigFreq.HighPart == 0)
/* done with init */
osi_EndOnce(&osi_logOnce);
}
-#endif /* !DJGPP */
logp = malloc(sizeof(osi_log_t));
memset(logp, 0, sizeof(osi_log_t));
StringCbCopyA(tbuffer, sizeof(tbuffer), "log:");
StringCbCatA(tbuffer, sizeof(tbuffer), namep);
typep = osi_RegisterFDType(tbuffer, &osi_logFDOps, logp);
-#ifndef DJGPP
if (typep) {
/* add formatting info */
osi_AddFDFormatInfo(typep, OSI_DBRPC_REGIONINT, 0,
osi_AddFDFormatInfo(typep, OSI_DBRPC_REGIONSTRING, 1,
"Time (mics)", 0);
}
-#endif
return logp;
}
-/* we just panic'd. Turn off all logging adding special log record
- * to all enabled logs. Be careful not to wait for a lock.
+/* we just panic'd. Log the error to all enabled log files.
+ * Be careful not to wait for a lock.
*/
-void osi_LogPanic(char *filep, size_t lineNumber)
+void osi_LogPanic(char *msgp, char *filep, size_t lineNumber)
{
osi_log_t *tlp;
/* otherwise, proceed */
if (filep)
- osi_LogAdd(tlp, "**PANIC** (file %s:%d)", (size_t) filep, lineNumber, 0, 0, 0);
+ osi_LogAdd(tlp, "**PANIC** \"%s\" (file %s:%d)", (size_t) msgp, (size_t) filep, lineNumber, 0, 0);
else
- osi_LogAdd(tlp, "**PANIC**", 0, 0, 0, 0, 0);
+ osi_LogAdd(tlp, "**PANIC** \"%s\"", (size_t)msgp, 0, 0, 0, 0);
/* should grab lock for this, but we're in panic, and better safe than
* sorry.
lep->tid = thrd_Current();
/* get the time, using the high res timer if available */
-#ifndef DJGPP
if (osi_logFreq) {
QueryPerformanceCounter(&bigTime);
lep->micros = (bigTime.LowPart / osi_logFreq) * osi_logTixToMicros;
}
else lep->micros = GetCurrentTime() * 1000;
-#else
- lep->micros = gettime_us();
-#endif /* !DJGPP */
lep->formatp = formatp;
lep->parms[0] = p0;
extern void osi_LogDisable(osi_log_t *);
-extern void osi_LogPanic(char *filep, size_t line);
+extern void osi_LogPanic(char *msgp, char *filep, size_t line);
extern void osi_LogPrint(osi_log_t *logp, FILE_HANDLE handle);
void (*SleepRProc)(LONG_PTR, struct osi_rwlock *);
void (*SleepWProc)(LONG_PTR, struct osi_rwlock *);
void (*SleepMProc)(LONG_PTR, struct osi_mutex *);
- void (*InitializeMutexProc)(struct osi_mutex *, char *);
- void (*InitializeRWLockProc)(struct osi_rwlock *, char *);
+ void (*InitializeMutexProc)(struct osi_mutex *, char *, unsigned short);
+ void (*InitializeRWLockProc)(struct osi_rwlock *, char *, unsigned short);
void (*FinalizeMutexProc)(struct osi_mutex *);
void (*FinalizeRWLockProc)(struct osi_rwlock *);
void (*ConvertWToRProc)(struct osi_rwlock *);
#include <afs/param.h>
#include <afs/stds.h>
-#ifndef DJGPP
#include <windows.h>
-#endif /* !DJGPP */
#include "osi.h"
#include <stdlib.h>
void osi_panic(char *msgp, char *filep, long line)
{
- osi_LogPanic(filep, line);
+ if (notifFunc)
+ (*notifFunc)(msgp, filep, line);
- if (notifFunc)
- (*notifFunc)(msgp, filep, line);
+ osi_LogPanic(msgp, filep, line);
}
/* get time in seconds since some relatively recent time */