From 64a1b3101fde534f10054f9f4890243c95e2c728 Mon Sep 17 00:00:00 2001 From: Derrick Brashear Date: Thu, 4 Jun 2009 00:15:05 +0000 Subject: [PATCH] unroll-experimental-rx-20090603 LICENSE IPL10 reconverge Rx with that in 1.5.x in prep for git --- src/libafsrpc/Makefile.in | 2 +- src/rx/Makefile.in | 24 +- src/rx/NTMakefile | 5 +- src/rx/rx.c | 626 ++++++++++++++++++-------------------- src/rx/rx.h | 258 ++++++++-------- src/rx/rx_clock.c | 2 - src/rx/rx_conncache.c | 2 - src/rx/rx_event.c | 2 - src/rx/rx_getaddr.c | 2 - src/rx/rx_globals.c | 5 - src/rx/rx_globals.h | 17 +- src/rx/rx_internal.h | 181 ----------- src/rx/rx_kcommon.c | 11 +- src/rx/rx_kcommon.h | 1 - src/rx/rx_lwp.c | 5 +- src/rx/rx_misc.c | 1 - src/rx/rx_multi.c | 4 - src/rx/rx_null.c | 2 - src/rx/rx_packet.c | 194 ++++++------ src/rx/rx_pthread.c | 1 - src/rx/rx_rdwr.c | 2 - src/rx/rx_trace.c | 1 - src/rx/rx_user.c | 17 +- src/rx/xdr.c | 1 - src/rx/xdr_array.c | 1 - src/rx/xdr_refernce.c | 1 - src/rx/xdr_rx.c | 2 - src/rxdebug/rxdebug.c | 2 - 28 files changed, 551 insertions(+), 821 deletions(-) delete mode 100644 src/rx/rx_internal.h diff --git a/src/libafsrpc/Makefile.in b/src/libafsrpc/Makefile.in index a0b471d60..fe8643832 100644 --- a/src/libafsrpc/Makefile.in +++ b/src/libafsrpc/Makefile.in @@ -185,7 +185,7 @@ rx_trace.o: ${RX}/rx_trace.c rx_multi.o: ${RX}/rx_multi.c ${CCRULE} ${RX}/rx_multi.c -${RXOBJS}: ${RX}/rx.h ${RX}/rx_user.h ${RX}/rx_globals.h ${RX}/rx_clock.h ${RX}/rx_queue.h ${RX}/rx_event.h ${RX}/rx_internal.h +${RXOBJS}: ${RX}/rx.h ${RX}/rx_user.h ${RX}/rx_globals.h ${RX}/rx_clock.h ${RX}/rx_queue.h ${RX}/rx_event.h rxkad_client.o: ${RXKAD}/rxkad_client.c ${CCRULE} ${RXKAD}/rxkad_client.c diff --git a/src/rx/Makefile.in b/src/rx/Makefile.in index 5aad9bc7d..f76a8687e 100644 --- a/src/rx/Makefile.in +++ b/src/rx/Makefile.in @@ -49,33 +49,33 @@ RX_component_version_number.c: AFS_component_version_number.c RX_component_version_number.o: RX_component_version_number.c -${RXOBJS}: ${BASICINCLS} rx.h rx_user.h rx_globals.h rx_prototypes.h rx_internal.h +${RXOBJS}: ${BASICINCLS} rx.h rx_user.h rx_globals.h rx_prototypes.h -${MULTIOBJS}: rx.h rx_multi.h rx_prototypes.h rx_internal.h +${MULTIOBJS}: rx.h rx_multi.h rx_prototypes.h ${XDROBJS}: xdr.h xdr_prototypes.h rxperf.o: rx.h rx_null.h rx_globals.h rx_prototypes.h -rx_user.o: rx.h rx_user.h rx_prototypes.h rx_internal.h +rx_user.o: rx.h rx_user.h rx_prototypes.h -rx_packet.o: rx_packet.c rx_packet.h rx_internal.h rx.h +rx_packet.o: rx_packet.c rx_packet.h rx.h -rx_rdwr.o: rx_rdwr.c rx.h rx_prototypes.h rx_internal.h +rx_rdwr.o: rx_rdwr.c rx.h rx_prototypes.h -rx.o: rx.h rx_user.h rx_prototypes.h rx_internal.h +rx.o: rx.h rx_user.h rx_prototypes.h -rx_conncache.o: rx.h rx_prototypes.h rx_internal.h +rx_conncache.o: rx.h rx_prototypes.h -rx_trace.o: rx_trace.h rx_internal.h +rx_trace.o: rx_trace.h -rx_getaddr.o: rx.h rx_getaddr.c rx_prototypes.h rx_internal.h +rx_getaddr.o: rx.h rx_getaddr.c rx_prototypes.h -rx_globals.o: rx.h rx_user.h rx_globals.h rx_prototypes.h rx_internal.h +rx_globals.o: rx.h rx_user.h rx_globals.h rx_prototypes.h -xdr_rx.o: xdr.h rx.h xdr_prototypes.h rx_prototypes.h rx_internal.h +xdr_rx.o: xdr.h rx.h xdr_prototypes.h rx_prototypes.h -xdr_refernce.o: xdr_refernce.c xdr.h xdr_prototypes.h rx_internal.h +xdr_refernce.o: xdr_refernce.c xdr.h xdr_prototypes.h rxperf: rxperf.o librx.a ${CC} -o $@ rxperf.o ${LIBS} diff --git a/src/rx/NTMakefile b/src/rx/NTMakefile index a7fc124da..5707c9391 100644 --- a/src/rx/NTMakefile +++ b/src/rx/NTMakefile @@ -55,10 +55,7 @@ INCFILES = \ $(INCFILEDIR)\rx_prototypes.h \ $(INCFILEDIR)\xdr.h -LOCAL_HEADERS = \ - rx_internal.h - -$(LIBOBJS): $(INCFILES) $(LOCAL_HEADERS) +$(LIBOBJS): $(INCFILES) $(MULTIOBJS): rx_multi.h diff --git a/src/rx/rx.c b/src/rx/rx.c index 2b96bda54..48182a734 100644 --- a/src/rx/rx.c +++ b/src/rx/rx.c @@ -69,7 +69,6 @@ RCSID #include "rx_kernel.h" #include "rx_clock.h" #include "rx_queue.h" -#include "rx_internal.h" #include "rx.h" #include "rx_globals.h" #include "rx_trace.h" @@ -101,7 +100,6 @@ extern afs_int32 afs_termState; # include # include #endif -# include "rx_internal.h" # include "rx.h" # include "rx_user.h" # include "rx_clock.h" @@ -448,7 +446,7 @@ rx_InitHost(u_int host, u_int port) if (afs_winsockInit() < 0) return -1; #endif - + #ifndef KERNEL /* * Initialize anything necessary to provide a non-premptive threading @@ -456,10 +454,10 @@ rx_InitHost(u_int host, u_int port) */ rxi_InitializeThreadSupport(); #endif - + /* Allocate and initialize a socket for client and perhaps server * connections. */ - + rx_socket = rxi_GetHostUDPSocket(host, (u_short) port); if (rx_socket == OSI_NULLSOCKET) { UNLOCK_RX_INIT; @@ -555,7 +553,9 @@ rx_InitHost(u_int host, u_int port) rx_SetEpoch(tv.tv_sec); /* Start time of this package, rxkad * will provide a randomer value. */ #endif - rx_MutexAdd(rxi_dataQuota, rx_extraQuota, rx_quota_mutex); /* + extra pkts caller asked to rsrv */ + MUTEX_ENTER(&rx_quota_mutex); + rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */ + MUTEX_EXIT(&rx_quota_mutex); /* *Slightly* random start time for the cid. This is just to help * out with the hashing function at the peer */ rx_nextCid = ((tv.tv_sec ^ tv.tv_usec) << RX_CIDSHIFT); @@ -796,84 +796,55 @@ rx_NewConnection(afs_uint32 shost, u_short sport, u_short sservice, int serviceSecurityIndex) { int hashindex, i; - afs_int32 cix, nclones; - struct rx_connection *conn, *tconn, *ptconn; + afs_int32 cid; + struct rx_connection *conn; SPLVAR; clock_NewTime(); dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex)); + /* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in + * the case of kmem_alloc? */ + conn = rxi_AllocConnection(); +#ifdef RX_ENABLE_LOCKS + MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0); + MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0); + CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0); +#endif NETPRI; MUTEX_ENTER(&rx_connHashTable_lock); - - /* - * allocate the connection and all of its clones. - * clones are flagged as such and have their - * parent set to the 0th connection object. - */ - for (nclones = rx_max_clones_per_connection, - conn = tconn = 0, - cix = 0; - cix <= nclones; - ++cix, ptconn = tconn) { - - tconn = rxi_AllocConnection(); - tconn->cid = (rx_nextCid += RX_MAXCALLS); - tconn->type = RX_CLIENT_CONNECTION; - tconn->epoch = rx_epoch; - tconn->peer = rxi_FindPeer(shost, sport, 0, 1); - tconn->serviceId = sservice; - tconn->securityObject = securityObject; - tconn->securityData = (void *) 0; - tconn->securityIndex = serviceSecurityIndex; - tconn->ackRate = RX_FAST_ACK_RATE; - tconn->nSpecific = 0; - tconn->specific = NULL; - tconn->challengeEvent = NULL; - tconn->delayedAbortEvent = NULL; - tconn->abortCount = 0; - tconn->error = 0; - - for (i = 0; i < RX_MAXCALLS; i++) { - tconn->twind[i] = rx_initSendWindow; - tconn->rwind[i] = rx_initReceiveWindow; - } - - if (cix == 0) { - conn = tconn; - conn->nclones = nclones; - conn->parent = 0; - conn->next_clone = 0; - rx_SetConnDeadTime(conn, rx_connDeadTime); - } else { - tconn->nclones = 0; - tconn->flags |= RX_CLONED_CONNECTION; - tconn->parent = conn; - ptconn->next_clone = tconn; - tconn->secondsUntilDead = 0; - tconn->secondsUntilPing = 0; - } - - /* generic connection setup */ -#ifdef RX_ENABLE_LOCKS - MUTEX_INIT(&tconn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, - 0); - MUTEX_INIT(&tconn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, - 0); - CV_INIT(&tconn->conn_call_cv, "conn call cv", CV_DEFAULT, 0); -#endif - RXS_NewConnection(securityObject, tconn); - hashindex = - CONN_HASH(shost, sport, tconn->cid, tconn->epoch, - RX_CLIENT_CONNECTION); - rx_AtomicIncrement_NL(tconn->refCount); /* no lock required since only this thread knows */ - tconn->next = rx_connHashTable[hashindex]; - rx_connHashTable[hashindex] = tconn; - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.nClientConns, rx_stats_mutex); + cid = (rx_nextCid += RX_MAXCALLS); + conn->type = RX_CLIENT_CONNECTION; + conn->cid = cid; + conn->epoch = rx_epoch; + conn->peer = rxi_FindPeer(shost, sport, 0, 1); + conn->serviceId = sservice; + conn->securityObject = securityObject; + conn->securityData = (void *) 0; + conn->securityIndex = serviceSecurityIndex; + rx_SetConnDeadTime(conn, rx_connDeadTime); + conn->ackRate = RX_FAST_ACK_RATE; + conn->nSpecific = 0; + conn->specific = NULL; + conn->challengeEvent = NULL; + conn->delayedAbortEvent = NULL; + conn->abortCount = 0; + conn->error = 0; + for (i = 0; i < RX_MAXCALLS; i++) { + conn->twind[i] = rx_initSendWindow; + conn->rwind[i] = rx_initReceiveWindow; } - + + RXS_NewConnection(securityObject, conn); + hashindex = + CONN_HASH(shost, sport, conn->cid, conn->epoch, RX_CLIENT_CONNECTION); + + conn->refCount++; /* no lock required since only this thread knows... */ + conn->next = rx_connHashTable[hashindex]; + rx_connHashTable[hashindex] = conn; + if (rx_stats_active) + rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex); MUTEX_EXIT(&rx_connHashTable_lock); USERPRI; return conn; @@ -884,15 +855,12 @@ rx_SetConnDeadTime(struct rx_connection *conn, int seconds) { /* The idea is to set the dead time to a value that allows several * keepalives to be dropped without timing out the connection. */ - struct rx_connection *tconn = - (rx_IsClonedConn(conn)) ? conn->parent : conn; - - tconn->secondsUntilDead = MAX(seconds, 6); - tconn->secondsUntilPing = rx_ConnSecondsUntilDead(tconn) / 6; + conn->secondsUntilDead = MAX(seconds, 6); + conn->secondsUntilPing = conn->secondsUntilDead / 6; } -rx_atomic_t rxi_lowPeerRefCount = 0; -rx_atomic_t rxi_lowConnRefCount = 0; +int rxi_lowPeerRefCount = 0; +int rxi_lowConnRefCount = 0; /* * Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock. @@ -914,23 +882,26 @@ rxi_CleanupConnection(struct rx_connection *conn) * idle (refCount == 0) after rx_idlePeerTime (60 seconds) have passed. */ MUTEX_ENTER(&rx_peerHashTable_lock); - if (rx_AtomicDecrement_NL(conn->peer->refCount) < 1) { + if (conn->peer->refCount < 2) { conn->peer->idleWhen = clock_Sec(); - if (rx_AtomicPeek_NL(conn->peer->refCount) < 0) { - rx_AtomicSwap_NL(&conn->peer->refCount, 0); - dpf(("UNDERCOUNT(peer %x)\n", conn->peer)); - if (rx_stats_active) - rx_AtomicIncrement(rxi_lowPeerRefCount, rx_stats_mutex); + if (conn->peer->refCount < 1) { + conn->peer->refCount = 1; + if (rx_stats_active) { + MUTEX_ENTER(&rx_stats_mutex); + rxi_lowPeerRefCount++; + MUTEX_EXIT(&rx_stats_mutex); + } } } + conn->peer->refCount--; MUTEX_EXIT(&rx_peerHashTable_lock); - if (rx_stats_active) + if (rx_stats_active) { if (conn->type == RX_SERVER_CONNECTION) - rx_AtomicDecrement(rx_stats.nServerConns, rx_stats_mutex); + rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex); else - rx_AtomicDecrement(rx_stats.nClientConns, rx_stats_mutex); + rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex); } #ifndef KERNEL if (conn->specific) { @@ -957,36 +928,7 @@ rxi_CleanupConnection(struct rx_connection *conn) void rxi_DestroyConnection(struct rx_connection *conn) { - struct rx_connection *tconn, *dtconn; - MUTEX_ENTER(&rx_connHashTable_lock); - - /* destroy any clones that might exist */ - if (!rx_IsClonedConn(conn)) { - tconn = conn->next_clone; - conn->next_clone = 0; /* once */ - - while (tconn) { - dtconn = tconn; - tconn = tconn->next_clone; - rxi_DestroyConnectionNoLock(dtconn); - /* - * if destroyed dtconn will be the head of - * rx_connCleanup_list. Remove it and clean - * it up now as no one else is holding a - * reference to it. - */ - if (dtconn == rx_connCleanup_list) { - rx_connCleanup_list = rx_connCleanup_list->next; - MUTEX_EXIT(&rx_connHashTable_lock); - /* rxi_CleanupConnection will free dtconn */ - rxi_CleanupConnection(dtconn); - MUTEX_ENTER(&rx_connHashTable_lock); - (conn->nclones)--; - } - } /* while(tconn) */ - } - /* !rx_IsCloned */ rxi_DestroyConnectionNoLock(conn); /* conn should be at the head of the cleanup list */ if (conn == rx_connCleanup_list) { @@ -1014,15 +956,17 @@ rxi_DestroyConnectionNoLock(struct rx_connection *conn) NETPRI; MUTEX_ENTER(&conn->conn_data_lock); - /* This requires the atomic type to be signed */ - if (rx_AtomicDecrement_NL(conn->refCount) < 0) { - dpf(("UNDERCOUNT(conn %x)\n", conn)); + if (conn->refCount > 0) + conn->refCount--; + else { if (rx_stats_active) { - rx_AtomicIncrement(rxi_lowConnRefCount, rx_stats_mutex); + MUTEX_ENTER(&rx_stats_mutex); + rxi_lowConnRefCount++; + MUTEX_EXIT(&rx_stats_mutex); } } - if ((rx_AtomicPeek_NL(conn->refCount) > 0) || (conn->flags & RX_CONN_BUSY)) { + if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) { /* Busy; wait till the last guy before proceeding */ MUTEX_EXIT(&conn->conn_data_lock); USERPRI; @@ -1079,7 +1023,9 @@ rxi_DestroyConnectionNoLock(struct rx_connection *conn) if (havecalls) { /* Don't destroy the connection if there are any call * structures still in use */ - rx_MutexOr(conn->flags, RX_CONN_DESTROY_ME, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->flags |= RX_CONN_DESTROY_ME; + MUTEX_EXIT(&conn->conn_data_lock); USERPRI; return; } @@ -1143,7 +1089,9 @@ rx_GetConnection(struct rx_connection *conn) SPLVAR; NETPRI; - rx_AtomicIncrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount++; + MUTEX_EXIT(&conn->conn_data_lock); USERPRI; } @@ -1219,53 +1167,44 @@ rx_NewCall(struct rx_connection *conn) #else osi_rxSleep(conn); #endif - rx_MutexDecrement(conn->makeCallWaiters, conn->conn_data_lock); - } else { - MUTEX_EXIT(&conn->conn_data_lock); - } + MUTEX_ENTER(&conn->conn_data_lock); + conn->makeCallWaiters--; + } + MUTEX_EXIT(&conn->conn_data_lock); - /* search for next free call on this connection or - * its clones, if any */ for (;;) { - struct rx_connection *tconn; - - for (tconn = conn; tconn; tconn = tconn->next_clone) { - for (i = 0; i < RX_MAXCALLS; i++) { - call = tconn->call[i]; - if (call) { - MUTEX_ENTER(&call->lock); - if (call->state == RX_STATE_DALLY) { - rxi_ResetCall(call, 0); - (*call->callNumber)++; - goto have_call; - } - MUTEX_EXIT(&call->lock); - } else { - call = rxi_NewCall(tconn, i); - goto have_call; - } - } /* for i < RX_MAXCALLS */ + for (i = 0; i < RX_MAXCALLS; i++) { + call = conn->call[i]; + if (call) { + MUTEX_ENTER(&call->lock); + if (call->state == RX_STATE_DALLY) { + rxi_ResetCall(call, 0); + (*call->callNumber)++; + break; + } + MUTEX_EXIT(&call->lock); + } else { + call = rxi_NewCall(conn, i); + break; + } + } + if (i < RX_MAXCALLS) { + break; } - - /* - * to be here, all available calls for this connection (and all - * of its clones) must be in use - */ - MUTEX_ENTER(&conn->conn_data_lock); conn->flags |= RX_CONN_MAKECALL_WAITING; conn->makeCallWaiters++; MUTEX_EXIT(&conn->conn_data_lock); - + #ifdef RX_ENABLE_LOCKS CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock); #else osi_rxSleep(conn); #endif - rx_MutexDecrement(conn->makeCallWaiters, conn->conn_data_lock); - } /* for ;; */ - - have_call: + MUTEX_ENTER(&conn->conn_data_lock); + conn->makeCallWaiters--; + MUTEX_EXIT(&conn->conn_data_lock); + } /* * Wake up anyone else who might be giving us a chance to * run (see code above that avoids resource starvation). @@ -1280,7 +1219,7 @@ rx_NewCall(struct rx_connection *conn) /* Client is initially in send mode */ call->state = RX_STATE_ACTIVE; - call->error = rx_ConnError(conn); + call->error = conn->error; if (call->error) call->mode = RX_MODE_ERROR; else @@ -1312,7 +1251,7 @@ rx_NewCall(struct rx_connection *conn) dpf(("rx_NewCall(call %x)\n", call)); return call; -} /* rx_NewCall */ +} int rxi_HasActiveCalls(struct rx_connection *aconn) @@ -1436,7 +1375,7 @@ rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId, if (socket == OSI_NULLSOCKET) { /* If we don't already have a socket (from another * service on same port) get a new one */ - socket = rxi_GetHostUDPSocket(htonl(INADDR_ANY), port); + socket = rxi_GetHostUDPSocket(host, port); if (socket == OSI_NULLSOCKET) { USERPRI; rxi_FreeService(tservice); @@ -1560,8 +1499,11 @@ rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp) (*tservice->afterProc) (call, code); rx_EndCall(call, code); - if (rx_stats_active) - rx_MutexIncrement(rxi_nCalls, rx_stats_mutex); + if (rx_stats_active) { + MUTEX_ENTER(&rx_stats_mutex); + rxi_nCalls++; + MUTEX_EXIT(&rx_stats_mutex); + } } } @@ -1718,7 +1660,9 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp) if (call->flags & RX_CALL_WAIT_PROC) { call->flags &= ~RX_CALL_WAIT_PROC; - rx_MutexDecrement(rx_nWaiting, rx_waiting_mutex); + MUTEX_ENTER(&rx_waiting_mutex); + rx_nWaiting--; + MUTEX_EXIT(&rx_waiting_mutex); } if (call->state != RX_STATE_PRECALL || call->error) { @@ -2152,7 +2096,7 @@ rx_Finalize(void) next = conn->next; if (conn->type == RX_CLIENT_CONNECTION) { /* MUTEX_ENTER(&conn->conn_data_lock); when used in kernel */ - rx_AtomicIncrement(conn->refCount, conn->conn_data_lock); + conn->refCount++; /* MUTEX_EXIT(&conn->conn_data_lock); when used in kernel */ #ifdef RX_ENABLE_LOCKS rxi_DestroyConnectionNoLock(conn); @@ -2269,7 +2213,7 @@ rxi_NewCall(struct rx_connection *conn, int channel) #endif /* AFS_GLOBAL_RXLOCK_KERNEL */ queue_Remove(call); if (rx_stats_active) - rx_AtomicDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex); + rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex); MUTEX_EXIT(&rx_freeCallQueue_lock); MUTEX_ENTER(&call->lock); CLEAR_CALL_QUEUE_LOCK(call); @@ -2291,7 +2235,7 @@ rxi_NewCall(struct rx_connection *conn, int channel) rx_allCallsp = call; call->call_id = #endif /* RXDEBUG_PACKET */ - rx_AtomicIncrement(rx_stats.nCallStructs, rx_stats_mutex); + rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex); MUTEX_EXIT(&rx_freeCallQueue_lock); MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL); @@ -2365,7 +2309,7 @@ rxi_FreeCall(struct rx_call *call) queue_Append(&rx_freeCallQueue, call); #endif /* AFS_GLOBAL_RXLOCK_KERNEL */ if (rx_stats_active) - rx_AtomicIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex); + rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex); MUTEX_EXIT(&rx_freeCallQueue_lock); /* Destroy the connection if it was previously slated for @@ -2381,7 +2325,9 @@ rxi_FreeCall(struct rx_call *call) * call lock held or are going through this section of code. */ if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) { - rx_AtomicIncrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount++; + MUTEX_EXIT(&conn->conn_data_lock); #ifdef RX_ENABLE_LOCKS if (haveCTLock) rxi_DestroyConnectionNoLock(conn); @@ -2467,7 +2413,7 @@ rxi_SetPeerMtu(afs_uint32 host, afs_uint32 port, int mtu) * structure hanging off a connection structure */ struct rx_peer * rxi_FindPeer(afs_uint32 host, u_short port, - struct rx_peer *origPeer, int create) + struct rx_peer *origPeer, int create) { struct rx_peer *pp; int hashIndex; @@ -2475,12 +2421,12 @@ rxi_FindPeer(afs_uint32 host, u_short port, MUTEX_ENTER(&rx_peerHashTable_lock); for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) { if ((pp->host == host) && (pp->port == port)) - break; + break; } if (!pp) { - if (create) { - pp = rxi_AllocPeer(); /* This bzero's *pp */ - pp->host = host; /* set here or in InitPeerParams is zero */ + if (create) { + pp = rxi_AllocPeer(); /* This bzero's *pp */ + pp->host = host; /* set here or in InitPeerParams is zero */ pp->port = port; MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0); queue_Init(&pp->congestionQueue); @@ -2488,15 +2434,15 @@ rxi_FindPeer(afs_uint32 host, u_short port, pp->next = rx_peerHashTable[hashIndex]; rx_peerHashTable[hashIndex] = pp; rxi_InitPeerParams(pp); - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.nPeerStructs, rx_stats_mutex); + if (rx_stats_active) + rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex); } } if (pp && create) { - rx_AtomicIncrement_NL(pp->refCount); + pp->refCount++; } if (origPeer) - rx_AtomicDecrement_NL(origPeer->refCount); + origPeer->refCount--; MUTEX_EXIT(&rx_peerHashTable_lock); return pp; } @@ -2600,10 +2546,12 @@ rxi_FindConnection(osi_socket socket, afs_int32 host, if (service->newConnProc) (*service->newConnProc) (conn); if (rx_stats_active) - rx_AtomicIncrement(rx_stats.nServerConns, rx_stats_mutex); + rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex); } - rx_AtomicIncrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount++; + MUTEX_EXIT(&conn->conn_data_lock); rxLastConn = conn; /* store this connection as the last conn used */ MUTEX_EXIT(&rx_connHashTable_lock); @@ -2709,12 +2657,12 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, /* If the connection is in an error state, send an abort packet and ignore * the incoming packet */ - if (rx_ConnError(conn)) { + if (conn->error) { /* Don't respond to an abort packet--we don't want loops! */ MUTEX_ENTER(&conn->conn_data_lock); if (np->header.type != RX_PACKET_TYPE_ABORT) np = rxi_SendConnectionAbort(conn, np, 1, 0); - rx_AtomicDecrement_NL(conn->refCount); + conn->refCount--; MUTEX_EXIT(&conn->conn_data_lock); return np; } @@ -2727,22 +2675,30 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, afs_int32 errcode = ntohl(rx_GetInt32(np, 0)); dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d", errcode)); rxi_ConnectionError(conn, errcode); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } case RX_PACKET_TYPE_CHALLENGE: tnp = rxi_ReceiveChallengePacket(conn, np, 1); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return tnp; case RX_PACKET_TYPE_RESPONSE: tnp = rxi_ReceiveResponsePacket(conn, np, 1); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return tnp; case RX_PACKET_TYPE_PARAMS: case RX_PACKET_TYPE_PARAMS + 1: case RX_PACKET_TYPE_PARAMS + 2: /* ignore these packet types for now */ - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; @@ -2752,7 +2708,7 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, rxi_ConnectionError(conn, RX_PROTOCOL_ERROR); MUTEX_ENTER(&conn->conn_data_lock); tnp = rxi_SendConnectionAbort(conn, np, 1, 0); - rx_AtomicDecrement_NL(conn->refCount); + conn->refCount--; MUTEX_EXIT(&conn->conn_data_lock); return tnp; } @@ -2788,9 +2744,11 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, * then, since this is a client connection we're getting data for * it must be for the previous call. */ - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + if (rx_stats_active) + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } } @@ -2799,13 +2757,15 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, if (type == RX_SERVER_CONNECTION) { /* We're the server */ if (np->header.callNumber < currentCallNumber) { - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); + if (rx_stats_active) + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); #ifdef RX_ENABLE_LOCKS if (call) MUTEX_EXIT(&call->lock); #endif - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } if (!call) { @@ -2831,9 +2791,11 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, rxi_CallError(call, rx_BusyError); tp = rxi_SendCallAbort(call, np, 1, 0); MUTEX_EXIT(&call->lock); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); if (rx_stats_active) - rx_AtomicIncrement(rx_stats.nBusies, rx_stats_mutex); + rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex); return tp; } rxi_KeepAliveOn(call); @@ -2869,7 +2831,9 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY, NULL, 0, 1); MUTEX_EXIT(&call->lock); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return tp; } rxi_ResetCall(call, 0); @@ -2892,9 +2856,11 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, rxi_CallError(call, rx_BusyError); tp = rxi_SendCallAbort(call, np, 1, 0); MUTEX_EXIT(&call->lock); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); if (rx_stats_active) - rx_AtomicIncrement(rx_stats.nBusies, rx_stats_mutex); + rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex); return tp; } rxi_KeepAliveOn(call); @@ -2905,28 +2871,32 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, /* Ignore all incoming acknowledgements for calls in DALLY state */ if (call && (call->state == RX_STATE_DALLY) && (np->header.type == RX_PACKET_TYPE_ACK)) { - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.ignorePacketDally, rx_stats_mutex); + if (rx_stats_active) + rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex); #ifdef RX_ENABLE_LOCKS if (call) { MUTEX_EXIT(&call->lock); } #endif - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } /* Ignore anything that's not relevant to the current call. If there * isn't a current call, then no packet is relevant. */ if (!call || (np->header.callNumber != currentCallNumber)) { - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); + if (rx_stats_active) + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); #ifdef RX_ENABLE_LOCKS if (call) { MUTEX_EXIT(&call->lock); } #endif - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } /* If the service security object index stamped in the packet does not @@ -2935,7 +2905,9 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, #ifdef RX_ENABLE_LOCKS MUTEX_EXIT(&call->lock); #endif - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } @@ -2982,9 +2954,11 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, * XXX code in receiveackpacket. */ if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) { if (rx_stats_active) - rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); MUTEX_EXIT(&call->lock); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } } @@ -3046,7 +3020,9 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d", errdata)); rxi_CallError(call, errdata); MUTEX_EXIT(&call->lock); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; /* xmitting; drop packet */ } case RX_PACKET_TYPE_BUSY: @@ -3071,7 +3047,9 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, break; #else /* RX_ENABLE_LOCKS */ MUTEX_EXIT(&call->lock); - rx_MutexDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; /* xmitting; drop packet */ #endif /* RX_ENABLE_LOCKS */ } @@ -3092,7 +3070,9 @@ rxi_ReceivePacket(struct rx_packet *np, osi_socket socket, * (if not, then the time won't actually be re-evaluated here). */ call->lastReceiveTime = clock_Sec(); MUTEX_EXIT(&call->lock); - rx_AtomicDecrement(conn->refCount, conn->conn_data_lock); + MUTEX_ENTER(&conn->conn_data_lock); + conn->refCount--; + MUTEX_EXIT(&conn->conn_data_lock); return np; } @@ -3159,7 +3139,7 @@ rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2) conn->checkReachEvent = NULL; waiting = conn->flags & RX_CONN_ATTACHWAIT; if (event) - rx_AtomicDecrement_NL(conn->refCount); + conn->refCount--; MUTEX_EXIT(&conn->conn_data_lock); if (waiting) { @@ -3196,7 +3176,7 @@ rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2) when.sec += RX_CHECKREACH_TIMEOUT; MUTEX_ENTER(&conn->conn_data_lock); if (!conn->checkReachEvent) { - rx_AtomicIncrement_NL(conn->refCount); + conn->refCount++; conn->checkReachEvent = rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn, NULL); @@ -3279,7 +3259,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, struct rx_packet *tnp; struct clock when, now; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dataPacketsRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex); #ifdef KERNEL /* If there are no packet buffers, drop this new packet, unless we can find @@ -3290,7 +3270,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, rxi_NeedMorePackets = TRUE; MUTEX_EXIT(&rx_freePktQ_lock); if (rx_stats_active) - rx_AtomicIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex); call->rprev = np->header.serial; rxi_calltrace(RX_TRACE_DROP, call); dpf(("packet %x dropped on receipt - quota problems", np)); @@ -3356,7 +3336,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, if (queue_IsNotEmpty(&call->rq) && queue_First(&call->rq, rx_packet)->header.seq == seq) { if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); dpf(("packet %x dropped on receipt - duplicate", np)); rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); @@ -3445,7 +3425,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, * application already, then this is a duplicate */ if (seq < call->rnext) { if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack); @@ -3473,7 +3453,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, /*Check for duplicate packet */ if (seq == tp->header.seq) { if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, @@ -3718,7 +3698,7 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np, int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */ if (rx_stats_active) - rx_AtomicIncrement(rx_stats.ackPacketsRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex); ap = (struct rx_ackPacket *)rx_DataOf(np); nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap); if (nbytes < 0) @@ -4010,9 +3990,9 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np, sizeof(afs_int32), &tSize); maxDgramPackets = (afs_uint32) ntohl(tSize); maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets); - maxDgramPackets = MIN(maxDgramPackets, peer->ifDgramPackets); - if (peer->natMTU < peer->ifMTU) - maxDgramPackets = MIN(maxDgramPackets, rxi_AdjustDgramPackets(1, peer->natMTU)); + maxDgramPackets = + MIN(maxDgramPackets, (int)(peer->ifDgramPackets)); + maxDgramPackets = MIN(maxDgramPackets, tSize); if (maxDgramPackets > 1) { peer->maxDgramPackets = maxDgramPackets; call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE; @@ -4546,7 +4526,7 @@ rxi_SendConnectionAbort(struct rx_connection *conn, afs_int32 error; struct clock when, now; - if (!rx_ConnError(conn)) + if (!conn->error) return packet; /* Clients should never delay abort messages */ @@ -4558,7 +4538,7 @@ rxi_SendConnectionAbort(struct rx_connection *conn, if (conn->delayedAbortEvent) { rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0); } - error = htonl(rx_ConnError(conn)); + error = htonl(conn->error); conn->abortCount++; MUTEX_EXIT(&conn->conn_data_lock); packet = @@ -4576,20 +4556,17 @@ rxi_SendConnectionAbort(struct rx_connection *conn, return packet; } -/* - * Associate an error all of the calls owned by a connection. Called +/* Associate an error all of the calls owned by a connection. Called * with error non-zero. This is only for really fatal things, like * bad authentication responses. The connection itself is set in * error at this point, so that future packets received will be - * rejected. - */ + * rejected. */ void rxi_ConnectionError(struct rx_connection *conn, afs_int32 error) { if (error) { int i; - struct rx_connection *tconn; dpf(("rxi_ConnectionError conn %x error %d", conn, error)); @@ -4600,25 +4577,20 @@ rxi_ConnectionError(struct rx_connection *conn, rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0); conn->checkReachEvent = 0; conn->flags &= ~RX_CONN_ATTACHWAIT; - rx_AtomicDecrement_NL(conn->refCount); + conn->refCount--; } MUTEX_EXIT(&conn->conn_data_lock); - - for ( tconn = rx_IsClonedConn(conn) ? conn->parent : conn; - tconn; - tconn = tconn->next_clone) { - for (i = 0; i < RX_MAXCALLS; i++) { - struct rx_call *call = tconn->call[i]; - if (call) { - MUTEX_ENTER(&call->lock); - rxi_CallError(call, error); - MUTEX_EXIT(&call->lock); - } - } + for (i = 0; i < RX_MAXCALLS; i++) { + struct rx_call *call = conn->call[i]; + if (call) { + MUTEX_ENTER(&call->lock); + rxi_CallError(call, error); + MUTEX_EXIT(&call->lock); + } } - rx_SetConnError(conn, error); + conn->error = error; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.fatalErrors, rx_stats_mutex); + rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex); } } @@ -5057,7 +5029,7 @@ rxi_SendAck(struct rx_call *call, } } if (rx_stats_active) - rx_AtomicIncrement(rx_stats.ackPacketsSent, rx_stats_mutex); + rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex); #ifndef RX_ENABLE_TSFPQ if (!optionalPacket) rxi_FreePacket(p); @@ -5082,7 +5054,7 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len, if (resending) peer->reSends += len; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dataPacketsSent, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex); MUTEX_EXIT(&peer->peer_lock); if (list[len - 1]->header.flags & RX_LAST_PACKET) { @@ -5118,7 +5090,7 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len, if (list[i]->header.serial) { requestAck = 1; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex); } else { /* improved RTO calculation- not Karn */ list[i]->firstSent = *now; @@ -5134,7 +5106,7 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len, if (resending) peer->reSends++; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.dataPacketsSent, rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex); MUTEX_EXIT(&peer->peer_lock); /* Tag this packet as not being the last in this group, @@ -5435,7 +5407,7 @@ rxi_Start(struct rxevent *event, /* Since we may block, don't trust this */ usenow.sec = usenow.usec = 0; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex); + rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex); continue; /* Ignore this packet if it has been acknowledged */ } @@ -5684,7 +5656,7 @@ rxi_CheckCall(struct rx_call *call) #endif /* dead time + RTT + 8*MDEV, rounded up to next second. */ deadTime = - (((afs_uint32) rx_ConnSecondsUntilDead(conn) << 10) + + (((afs_uint32) conn->secondsUntilDead << 10) + ((afs_uint32) conn->peer->rtt >> 3) + ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10; now = clock_Sec(); @@ -5744,24 +5716,23 @@ rxi_CheckCall(struct rx_call *call) * attached process can die reasonably gracefully. */ } /* see if we have a non-activity timeout */ - if (call->startWait && rx_ConnIdleDeadTime(conn) - && ((call->startWait + rx_ConnIdleDeadTime(conn)) < now)) { + if (call->startWait && conn->idleDeadTime + && ((call->startWait + conn->idleDeadTime) < now)) { if (call->state == RX_STATE_ACTIVE) { rxi_CallError(call, RX_CALL_TIMEOUT); return -1; } } - if (call->lastSendData && rx_ConnIdleDeadTime(conn) - && (rx_ConnIdleDeadErr(conn) != 0) - && ((call->lastSendData + rx_ConnIdleDeadTime(conn)) < now)) { + if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0) + && ((call->lastSendData + conn->idleDeadTime) < now)) { if (call->state == RX_STATE_ACTIVE) { rxi_CallError(call, conn->idleDeadErr); return -1; } } /* see if we have a hard timeout */ - if (rx_ConnHardDeadTime(conn) - && (now > (rx_ConnHardDeadTime(conn) + call->startTime.sec))) { + if (conn->hardDeadTime + && (now > (conn->hardDeadTime + call->startTime.sec))) { if (call->state == RX_STATE_ACTIVE) rxi_CallError(call, RX_CALL_TIMEOUT); return -1; @@ -5806,7 +5777,7 @@ rxi_KeepAliveEvent(struct rxevent *event, void *arg1, void *dummy) } conn = call->conn; - if ((now - call->lastSendTime) > rx_ConnSecondsUntilPing(conn)) { + if ((now - call->lastSendTime) > conn->secondsUntilPing) { /* Don't try to send keepalives if there is unacknowledged data */ /* the rexmit code should be good enough, this little hack * doesn't quite work XXX */ @@ -5824,7 +5795,7 @@ rxi_ScheduleKeepAliveEvent(struct rx_call *call) struct clock when, now; clock_GetTime(&now); when = now; - when.sec += rx_ConnSecondsUntilPing(call->conn); + when.sec += call->conn->secondsUntilPing; CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE); call->keepAliveEvent = rxevent_PostNow(&when, &now, rxi_KeepAliveEvent, call, 0); @@ -5857,7 +5828,7 @@ rxi_SendDelayedConnAbort(struct rxevent *event, MUTEX_ENTER(&conn->conn_data_lock); conn->delayedAbortEvent = NULL; - error = htonl(rx_ConnError(conn)); + error = htonl(conn->error); conn->abortCount++; MUTEX_EXIT(&conn->conn_data_lock); packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL); @@ -6000,7 +5971,7 @@ rxi_ComputeRoundTripTime(struct rx_packet *p, rx_stats.maxRtt = *rttp; } clock_Add(&rx_stats.totalRtt, rttp); - rx_AtomicIncrement_NL(rx_stats.nRttSamples); + rx_stats.nRttSamples++; MUTEX_EXIT(&rx_stats_mutex); } @@ -6118,10 +6089,10 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2) /* This only actually destroys the connection if * there are no outstanding calls */ MUTEX_ENTER(&conn->conn_data_lock); - if (!havecalls && (rx_AtomicPeek_NL(conn->refCount) == 0) + if (!havecalls && !conn->refCount && ((conn->lastSendTime + rx_idleConnectionTime) < now.sec)) { - rx_AtomicIncrement_NL(conn->refCount); /* it will be decr in rx_DestroyConn */ + conn->refCount++; /* it will be decr in rx_DestroyConn */ MUTEX_EXIT(&conn->conn_data_lock); #ifdef RX_ENABLE_LOCKS rxi_DestroyConnectionNoLock(conn); @@ -6164,7 +6135,7 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2) for (prev = peer = *peer_ptr; peer; peer = next) { next = peer->next; code = MUTEX_TRYENTER(&peer->peer_lock); - if ((code) && (rx_AtomicPeek_NL(peer->refCount) == 0) + if ((code) && (peer->refCount == 0) && ((peer->idleWhen + rx_idlePeerTime) < now.sec)) { rx_interface_stat_p rpc_stat, nrpc_stat; size_t space; @@ -6189,7 +6160,7 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2) } rxi_FreePeer(peer); if (rx_stats_active) - rx_AtomicDecrement(rx_stats.nPeerStructs, rx_stats_mutex); + rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex); if (peer == *peer_ptr) { *peer_ptr = next; prev = next; @@ -6504,70 +6475,57 @@ rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size, } fprintf(file, "rx stats: free packets %d, allocs %d, ", (int)freePackets, - rx_AtomicPeek_NL(s->packetRequests)); + s->packetRequests); if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) { fprintf(file, "alloc-failures(rcv %u/%u,send %u/%u,ack %u)\n", - rx_AtomicPeek_NL(s->receivePktAllocFailures), - rx_AtomicPeek_NL(s->receiveCbufPktAllocFailures), - rx_AtomicPeek_NL(s->sendPktAllocFailures), - rx_AtomicPeek_NL(s->sendCbufPktAllocFailures), - rx_AtomicPeek_NL(s->specialPktAllocFailures)); + s->receivePktAllocFailures, s->receiveCbufPktAllocFailures, + s->sendPktAllocFailures, s->sendCbufPktAllocFailures, + s->specialPktAllocFailures); } else { fprintf(file, "alloc-failures(rcv %u,send %u,ack %u)\n", - rx_AtomicPeek_NL(s->receivePktAllocFailures), - rx_AtomicPeek_NL(s->sendPktAllocFailures), - rx_AtomicPeek_NL(s->specialPktAllocFailures)); + s->receivePktAllocFailures, s->sendPktAllocFailures, + s->specialPktAllocFailures); } fprintf(file, " greedy %u, " "bogusReads %u (last from host %x), " "noPackets %u, " "noBuffers %u, " "selects %u, " - "sendSelects %u\n", - rx_AtomicPeek_NL(s->socketGreedy), - rx_AtomicPeek_NL(s->bogusPacketOnRead), - rx_AtomicPeek_NL(s->bogusHost), - rx_AtomicPeek_NL(s->noPacketOnRead), - rx_AtomicPeek_NL(s->noPacketBuffersOnRead), - rx_AtomicPeek_NL(s->selects), - rx_AtomicPeek_NL(s->sendSelects)); + "sendSelects %u\n", s->socketGreedy, s->bogusPacketOnRead, + s->bogusHost, s->noPacketOnRead, s->noPacketBuffersOnRead, + s->selects, s->sendSelects); fprintf(file, " packets read: "); for (i = 0; i < RX_N_PACKET_TYPES; i++) { - fprintf(file, "%s %u ", rx_packetTypes[i], rx_AtomicPeek_NL(s->packetsRead[i])); + fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsRead[i]); } fprintf(file, "\n"); fprintf(file, " other read counters: data %u, " "ack %u, " "dup %u " - "spurious %u " "dally %u\n", rx_AtomicPeek_NL(s->dataPacketsRead), - rx_AtomicPeek_NL(s->ackPacketsRead), - rx_AtomicPeek_NL(s->dupPacketsRead), - rx_AtomicPeek_NL(s->spuriousPacketsRead), - rx_AtomicPeek_NL(s->ignorePacketDally)); + "spurious %u " "dally %u\n", s->dataPacketsRead, + s->ackPacketsRead, s->dupPacketsRead, s->spuriousPacketsRead, + s->ignorePacketDally); fprintf(file, " packets sent: "); for (i = 0; i < RX_N_PACKET_TYPES; i++) { - fprintf(file, "%s %u ", rx_packetTypes[i], rx_AtomicPeek_NL(s->packetsSent[i])); + fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsSent[i]); } fprintf(file, "\n"); fprintf(file, " other send counters: ack %u, " "data %u (not resends), " "resends %u, " "pushed %u, " "acked&ignored %u\n", - rx_AtomicPeek_NL(s->ackPacketsSent), - rx_AtomicPeek_NL(s->dataPacketsSent), - rx_AtomicPeek_NL(s->dataPacketsReSent), - rx_AtomicPeek_NL(s->dataPacketsPushed), - rx_AtomicPeek_NL(s->ignoreAckedPacket)); + s->ackPacketsSent, s->dataPacketsSent, s->dataPacketsReSent, + s->dataPacketsPushed, s->ignoreAckedPacket); fprintf(file, " \t(these should be small) sendFailed %u, " "fatalErrors %u\n", - rx_AtomicPeek_NL(s->netSendFailures), rx_AtomicPeek_NL(s->fatalErrors)); + s->netSendFailures, (int)s->fatalErrors); - if (rx_AtomicPeek_NL(s->nRttSamples)) { + if (s->nRttSamples) { fprintf(file, " Average rtt is %0.3f, with %d samples\n", - clock_Float(&s->totalRtt) / rx_AtomicPeek_NL(s->nRttSamples), rx_AtomicPeek_NL(s->nRttSamples)); + clock_Float(&s->totalRtt) / s->nRttSamples, s->nRttSamples); fprintf(file, " Minimum rtt is %0.3f, maximum is %0.3f\n", clock_Float(&s->minRtt), clock_Float(&s->maxRtt)); @@ -6576,11 +6534,8 @@ rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size, fprintf(file, " %d server connections, " "%d client connections, " "%d peer structs, " "%d call structs, " "%d free call structs\n", - rx_AtomicPeek_NL(s->nServerConns), - rx_AtomicPeek_NL(s->nClientConns), - rx_AtomicPeek_NL(s->nPeerStructs), - rx_AtomicPeek_NL(s->nCallStructs), - rx_AtomicPeek_NL(s->nFreeCallStructs)); + s->nServerConns, s->nClientConns, s->nPeerStructs, + s->nCallStructs, s->nFreeCallStructs); #if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY) fprintf(file, " %d clock updates\n", clock_nUpdates); @@ -6746,10 +6701,11 @@ rx_GetServerDebug(osi_socket socket, afs_uint32 remoteAddr, afs_uint32 * supportedValues) { #ifndef RXDEBUG - afs_int32 rc = -1; + afs_int32 rc = -1; #else afs_int32 rc = 0; struct rx_debugIn in; + afs_int32 *lp = (afs_int32 *) stat; *supportedValues = 0; in.type = htonl(RX_DEBUGI_GETSTATS); @@ -6810,12 +6766,12 @@ rx_GetServerStats(osi_socket socket, afs_uint32 remoteAddr, afs_uint32 * supportedValues) { #ifndef RXDEBUG - afs_int32 rc = -1; + afs_int32 rc = -1; #else afs_int32 rc = 0; struct rx_debugIn in; - int i; afs_int32 *lp = (afs_int32 *) stat; + int i; /* * supportedValues is currently unused, but added to allow future @@ -6931,7 +6887,7 @@ rx_GetServerConnections(osi_socket socket, afs_uint32 remoteAddr, for (i = 0; i < RX_MAXCALLS; i++) { conn->callNumber[i] = ntohl(conn->callNumber[i]); } - rx_SetConnError(conn, ntohl(rx_ConnError(conn))); + conn->error = ntohl(conn->error); conn->secStats.flags = ntohl(conn->secStats.flags); conn->secStats.expires = ntohl(conn->secStats.expires); conn->secStats.packetsReceived = @@ -7079,12 +7035,14 @@ shutdown_rx(void) sizeof(rx_function_entry_v1_t); rxi_Free(rpc_stat, space); - rx_MutexAdd(rxi_rpc_peer_stat_cnt, -num_funcs, rx_rpc_stats); + MUTEX_ENTER(&rx_rpc_stats); + rxi_rpc_peer_stat_cnt -= num_funcs; + MUTEX_EXIT(&rx_rpc_stats); } next = peer->next; rxi_FreePeer(peer); if (rx_stats_active) - rx_AtomicDecrement(rx_stats.nPeerStructs, rx_stats_mutex); + rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex); } } } @@ -7173,44 +7131,38 @@ void rx_SetSpecific(struct rx_connection *conn, int key, void *ptr) { int i; - struct rx_connection *tconn = - (rx_IsClonedConn(conn)) ? conn->parent : conn; - - MUTEX_ENTER(&tconn->conn_data_lock); - if (!tconn->specific) { - tconn->specific = (void **)malloc((key + 1) * sizeof(void *)); + MUTEX_ENTER(&conn->conn_data_lock); + if (!conn->specific) { + conn->specific = (void **)malloc((key + 1) * sizeof(void *)); for (i = 0; i < key; i++) - tconn->specific[i] = NULL; - tconn->nSpecific = key + 1; - tconn->specific[key] = ptr; - } else if (key >= tconn->nSpecific) { - tconn->specific = (void **) - realloc(tconn->specific, (key + 1) * sizeof(void *)); - for (i = tconn->nSpecific; i < key; i++) - tconn->specific[i] = NULL; - tconn->nSpecific = key + 1; - tconn->specific[key] = ptr; + conn->specific[i] = NULL; + conn->nSpecific = key + 1; + conn->specific[key] = ptr; + } else if (key >= conn->nSpecific) { + conn->specific = (void **) + realloc(conn->specific, (key + 1) * sizeof(void *)); + for (i = conn->nSpecific; i < key; i++) + conn->specific[i] = NULL; + conn->nSpecific = key + 1; + conn->specific[key] = ptr; } else { - if (tconn->specific[key] && rxi_keyCreate_destructor[key]) + if (conn->specific[key] && rxi_keyCreate_destructor[key]) (*rxi_keyCreate_destructor[key]) (conn->specific[key]); - tconn->specific[key] = ptr; + conn->specific[key] = ptr; } - MUTEX_EXIT(&tconn->conn_data_lock); + MUTEX_EXIT(&conn->conn_data_lock); } void * rx_GetSpecific(struct rx_connection *conn, int key) { void *ptr; - struct rx_connection *tconn = - (rx_IsClonedConn(conn)) ? conn->parent : conn; - - MUTEX_ENTER(&tconn->conn_data_lock); - if (key >= tconn->nSpecific) + MUTEX_ENTER(&conn->conn_data_lock); + if (key >= conn->nSpecific) ptr = NULL; else - ptr = tconn->specific[key]; - MUTEX_EXIT(&tconn->conn_data_lock); + ptr = conn->specific[key]; + MUTEX_EXIT(&conn->conn_data_lock); return ptr; } diff --git a/src/rx/rx.h b/src/rx/rx.h index 687b11be1..ba9537f16 100644 --- a/src/rx/rx.h +++ b/src/rx/rx.h @@ -111,24 +111,16 @@ int ntoh_syserr_conv(int error); #define rx_GetLocalStatus(call, status) ((call)->localStatus) #define rx_GetRemoteStatus(call) ((call)->remoteStatus) #define rx_Error(call) ((call)->error) +#define rx_ConnError(conn) ((conn)->error) #define rx_IsServerConn(conn) ((conn)->type == RX_SERVER_CONNECTION) #define rx_IsClientConn(conn) ((conn)->type == RX_CLIENT_CONNECTION) /* Don't use these; use the IsServerConn style */ #define rx_ServerConn(conn) ((conn)->type == RX_SERVER_CONNECTION) #define rx_ClientConn(conn) ((conn)->type == RX_CLIENT_CONNECTION) #define rx_IsUsingPktCksum(conn) ((conn)->flags & RX_CONN_USING_PACKET_CKSUM) -#define rx_IsClonedConn(conn) ((conn)->flags & RX_CLONED_CONNECTION) #define rx_ServiceIdOf(conn) ((conn)->serviceId) #define rx_SecurityClassOf(conn) ((conn)->securityIndex) #define rx_SecurityObjectOf(conn) ((conn)->securityObject) -#define rx_ConnError(conn) (rx_IsClonedConn((conn)) ? (conn)->parent->error : (conn)->error) -#define rx_SetConnError(conn, err) (rx_IsClonedConn((conn)) ? ((conn)->parent->error = err): ((conn)->error = err)) -#define rx_ConnHardDeadTime(conn) (rx_IsClonedConn((conn)) ? (conn)->parent->hardDeadTime : (conn)->hardDeadTime) -#define rx_ConnIdleDeadTime(conn) (rx_IsClonedConn((conn)) ? (conn)->parent->idleDeadTime : (conn)->idleDeadTime) -#define rx_ConnIdleDeadErr(conn) (rx_IsClonedConn((conn)) ? (conn)->parent->idleDeadErr : (conn)->idleDeadErr) -#define rx_ConnSecondsUntilDead(conn) (rx_IsClonedConn((conn)) ? (conn)->parent->secondsUntilDead : (conn)->secondsUntilDead) -#define rx_ConnSecondsUntilPing(conn) (rx_IsClonedConn((conn)) ? (conn)->parent->secondsUntilPing : (conn)->secondsUntilPing) - /******************* * Macros callable by the user to further define attributes of a @@ -176,30 +168,9 @@ int ntoh_syserr_conv(int error); #define rx_SetCheckReach(service, x) ((service)->checkReach = (x)) /* Set connection hard and idle timeouts for a connection */ -#define rx_SetConnHardDeadTime(conn, seconds)\ - {\ - if (rx_IsClonedConn(conn)) \ - (conn)->parent->hardDeadTime = (seconds); \ - else \ - (conn)->hardDeadTime = (seconds); \ - } - -#define rx_SetConnIdleDeadTime(conn, seconds)\ - {\ - if (rx_IsClonedConn(conn)) \ - (conn)->parent->idleDeadTime = (seconds); \ - else \ - (conn)->idleDeadTime = (seconds); \ - } - -#define rx_SetServerConnIdleDeadErr(conn, err)\ - {\ - if (rx_IsClonedConn(conn)) \ - (conn)->parent->idleDeadErr = (err); \ - else \ - (conn)->idleDeadErr = (err); \ - } - +#define rx_SetConnHardDeadTime(conn, seconds) ((conn)->hardDeadTime = (seconds)) +#define rx_SetConnIdleDeadTime(conn, seconds) ((conn)->idleDeadTime = (seconds)) +#define rx_SetServerConnIdleDeadErr(conn,err) ((conn)->idleDeadErr = (err)) /* Set the overload threshold and the overload error */ #define rx_SetBusyThreshold(threshold, code) (rx_BusyThreshold=(threshold),rx_BusyError=(code)) @@ -241,22 +212,6 @@ returned with an error code of RX_CALL_DEAD ( transient error ) */ #define rx_EnableHotThread() (rx_enable_hot_thread = 1) #define rx_DisableHotThread() (rx_enable_hot_thread = 0) -/* Macros to set max connection clones (each allows RX_MAXCALLS - * outstanding calls */ - -#define rx_SetMaxCalls(v) \ -do {\ - rx_SetCloneMax(v/4); \ -} while(0); - -#define rx_SetCloneMax(v) \ -do {\ - if(v < RX_HARD_MAX_CLONES) \ - rx_max_clones_per_connection = v; \ -} while(0); - -typedef afs_int32 rx_atomic_t; - #define rx_PutConnection(conn) rx_DestroyConnection(conn) /* A connection is an authenticated communication path, allowing @@ -267,7 +222,7 @@ struct rx_connection_rx_lock { struct rx_peer_rx_lock *peer; #else struct rx_connection { - struct rx_connection *next; /* on hash chain _or_ free list */ + struct rx_connection *next; /* on hash chain _or_ free list */ struct rx_peer *peer; #endif #ifdef RX_ENABLE_LOCKS @@ -298,7 +253,7 @@ struct rx_connection { /* client-- to retransmit the challenge */ struct rx_service *service; /* used by servers only */ u_short serviceId; /* To stamp on requests (clients only) */ - rx_atomic_t refCount; /* Reference count */ + afs_uint32 refCount; /* Reference count */ u_char flags; /* Defined below */ u_char type; /* Type of connection, defined below */ u_char secondsUntilPing; /* how often to ping for each active call */ @@ -319,9 +274,6 @@ struct rx_connection { afs_int32 idleDeadErr; int nSpecific; /* number entries in specific data */ void **specific; /* pointer to connection specific data */ - struct rx_connection *parent; /* primary connection, if this is a clone */ - struct rx_connection *next_clone; /* next in list of clones */ - afs_uint32 nclones; /* count of clone connections (if not a clone) */ }; @@ -419,8 +371,7 @@ struct rx_peer { /* For garbage collection */ afs_uint32 idleWhen; /* When the refcountwent to zero */ - rx_atomic_t refCount; /* Reference count */ - + afs_uint32 refCount; /* Reference count for this structure */ /* Congestion control parameters */ u_char burstSize; /* Reinitialization size for the burst parameter */ @@ -478,7 +429,6 @@ struct rx_peer { #define RX_CONN_RESET 16 /* connection is reset, remove */ #define RX_CONN_BUSY 32 /* connection is busy; don't delete */ #define RX_CONN_ATTACHWAIT 64 /* attach waiting for peer->lastReach */ -#define RX_CLONED_CONNECTION 128 /* connection is a clone */ /* Type of connection, client or server */ #define RX_CLIENT_CONNECTION 0 @@ -864,47 +814,47 @@ struct rx_securityClass { * must equal sizeof(afs_int32). */ struct rx_statistics { /* General rx statistics */ - rx_atomic_t packetRequests; /* Number of packet allocation requests */ - rx_atomic_t receivePktAllocFailures; - rx_atomic_t sendPktAllocFailures; - rx_atomic_t specialPktAllocFailures; - rx_atomic_t socketGreedy; /* Whether SO_GREEDY succeeded */ - rx_atomic_t bogusPacketOnRead; /* Number of inappropriately short packets received */ - rx_atomic_t bogusHost; /* Host address from bogus packets */ - rx_atomic_t noPacketOnRead; /* Number of read packets attempted when there was actually no packet to read off the wire */ - rx_atomic_t noPacketBuffersOnRead; /* Number of dropped data packets due to lack of packet buffers */ - rx_atomic_t selects; /* Number of selects waiting for packet or timeout */ - rx_atomic_t sendSelects; /* Number of selects forced when sending packet */ - rx_atomic_t packetsRead[RX_N_PACKET_TYPES]; /* Total number of packets read, per type */ - rx_atomic_t dataPacketsRead; /* Number of unique data packets read off the wire */ - rx_atomic_t ackPacketsRead; /* Number of ack packets read */ - rx_atomic_t dupPacketsRead; /* Number of duplicate data packets read */ - rx_atomic_t spuriousPacketsRead; /* Number of inappropriate data packets */ - rx_atomic_t packetsSent[RX_N_PACKET_TYPES]; /* Number of rxi_Sends: packets sent over the wire, per type */ - rx_atomic_t ackPacketsSent; /* Number of acks sent */ - rx_atomic_t pingPacketsSent; /* Total number of ping packets sent */ - rx_atomic_t abortPacketsSent; /* Total number of aborts */ - rx_atomic_t busyPacketsSent; /* Total number of busies sent received */ - rx_atomic_t dataPacketsSent; /* Number of unique data packets sent */ - rx_atomic_t dataPacketsReSent; /* Number of retransmissions */ - rx_atomic_t dataPacketsPushed; /* Number of retransmissions pushed early by a NACK */ - rx_atomic_t ignoreAckedPacket; /* Number of packets with acked flag, on rxi_Start */ + int packetRequests; /* Number of packet allocation requests */ + int receivePktAllocFailures; + int sendPktAllocFailures; + int specialPktAllocFailures; + int socketGreedy; /* Whether SO_GREEDY succeeded */ + int bogusPacketOnRead; /* Number of inappropriately short packets received */ + int bogusHost; /* Host address from bogus packets */ + int noPacketOnRead; /* Number of read packets attempted when there was actually no packet to read off the wire */ + int noPacketBuffersOnRead; /* Number of dropped data packets due to lack of packet buffers */ + int selects; /* Number of selects waiting for packet or timeout */ + int sendSelects; /* Number of selects forced when sending packet */ + int packetsRead[RX_N_PACKET_TYPES]; /* Total number of packets read, per type */ + int dataPacketsRead; /* Number of unique data packets read off the wire */ + int ackPacketsRead; /* Number of ack packets read */ + int dupPacketsRead; /* Number of duplicate data packets read */ + int spuriousPacketsRead; /* Number of inappropriate data packets */ + int packetsSent[RX_N_PACKET_TYPES]; /* Number of rxi_Sends: packets sent over the wire, per type */ + int ackPacketsSent; /* Number of acks sent */ + int pingPacketsSent; /* Total number of ping packets sent */ + int abortPacketsSent; /* Total number of aborts */ + int busyPacketsSent; /* Total number of busies sent received */ + int dataPacketsSent; /* Number of unique data packets sent */ + int dataPacketsReSent; /* Number of retransmissions */ + int dataPacketsPushed; /* Number of retransmissions pushed early by a NACK */ + int ignoreAckedPacket; /* Number of packets with acked flag, on rxi_Start */ struct clock totalRtt; /* Total round trip time measured (use to compute average) */ struct clock minRtt; /* Minimum round trip time measured */ struct clock maxRtt; /* Maximum round trip time measured */ - rx_atomic_t nRttSamples; /* Total number of round trip samples */ - rx_atomic_t nServerConns; /* Total number of server connections */ - rx_atomic_t nClientConns; /* Total number of client connections */ - rx_atomic_t nPeerStructs; /* Total number of peer structures */ - rx_atomic_t nCallStructs; /* Total number of call structures allocated */ - rx_atomic_t nFreeCallStructs; /* Total number of previously allocated free call structures */ - rx_atomic_t netSendFailures; - rx_atomic_t fatalErrors; - rx_atomic_t ignorePacketDally; /* packets dropped because call is in dally state */ - rx_atomic_t receiveCbufPktAllocFailures; - rx_atomic_t sendCbufPktAllocFailures; - rx_atomic_t nBusies; - rx_atomic_t spares[4]; + int nRttSamples; /* Total number of round trip samples */ + int nServerConns; /* Total number of server connections */ + int nClientConns; /* Total number of client connections */ + int nPeerStructs; /* Total number of peer structures */ + int nCallStructs; /* Total number of call structures allocated */ + int nFreeCallStructs; /* Total number of previously allocated free call structures */ + int netSendFailures; + afs_int32 fatalErrors; + int ignorePacketDally; /* packets dropped because call is in dally state */ + int receiveCbufPktAllocFailures; + int sendCbufPktAllocFailures; + int nBusies; + int spares[4]; }; /* structures for debug input and output packets */ @@ -956,46 +906,44 @@ struct rx_debugStats { }; struct rx_debugConn_vL { - afs_uint32 host; - afs_uint32 cid; - struct rx_debugConn_vL *parent; /* primary connection, if this is a clone */ - afs_uint32 serial; - afs_uint32 callNumber[RX_MAXCALLS]; - afs_uint32 error; - u_short port; - u_char flags; - u_char type; - u_char securityIndex; - u_char callState[RX_MAXCALLS]; - u_char callMode[RX_MAXCALLS]; - u_char callFlags[RX_MAXCALLS]; - u_char callOther[RX_MAXCALLS]; + afs_int32 host; + afs_int32 cid; + afs_int32 serial; + afs_int32 callNumber[RX_MAXCALLS]; + afs_int32 error; + short port; + char flags; + char type; + char securityIndex; + char callState[RX_MAXCALLS]; + char callMode[RX_MAXCALLS]; + char callFlags[RX_MAXCALLS]; + char callOther[RX_MAXCALLS]; /* old style getconn stops here */ struct rx_securityObjectStats secStats; - afs_uint32 sparel[10]; + afs_int32 sparel[10]; }; struct rx_debugConn { - afs_uint32 host; - afs_uint32 cid; - struct rx_debugConn *parent; /* primary connection, if this is a clone */ - afs_uint32 serial; - afs_uint32 callNumber[RX_MAXCALLS]; - afs_uint32 error; - u_short port; - u_char flags; - u_char type; - u_char securityIndex; - u_char sparec[3]; /* force correct alignment */ - u_char callState[RX_MAXCALLS]; - u_char callMode[RX_MAXCALLS]; - u_char callFlags[RX_MAXCALLS]; - u_char callOther[RX_MAXCALLS]; + afs_int32 host; + afs_int32 cid; + afs_int32 serial; + afs_int32 callNumber[RX_MAXCALLS]; + afs_int32 error; + short port; + char flags; + char type; + char securityIndex; + char sparec[3]; /* force correct alignment */ + char callState[RX_MAXCALLS]; + char callMode[RX_MAXCALLS]; + char callFlags[RX_MAXCALLS]; + char callOther[RX_MAXCALLS]; /* old style getconn stops here */ struct rx_securityObjectStats secStats; - afs_uint32 epoch; - afs_uint32 natMTU; - afs_uint32 sparel[9]; + afs_int32 epoch; + afs_int32 natMTU; + afs_int32 sparel[9]; }; struct rx_debugPeer { @@ -1137,7 +1085,59 @@ typedef struct rx_interface_stat { #ifdef AFS_NT40_ENV extern int rx_DumpCalls(FILE *outputFile, char *cookie); -#endif /* AFS_NT40_ENV */ + +#define rx_MutexIncrement(object, mutex) InterlockedIncrement(&object) +#define rx_MutexAdd(object, addend, mutex) InterlockedAdd(&object, addend) +#define rx_MutexDecrement(object, mutex) InterlockedDecrement(&object) +#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object1 += addend; \ + InterlockedIncrement(&object2); \ + MUTEX_EXIT(&mutex); \ + } while (0) +#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object1 += addend; \ + InterlockedDecrement(&object2); \ + MUTEX_EXIT(&mutex); \ + } while (0) +#else +#define rx_MutexIncrement(object, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object++; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexAdd(object, addend, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object += addend; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object1 += addend; \ + object2++; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object1 += addend; \ + object2--; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexDecrement(object, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object--; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#endif + #endif /* _RX_ End of rx.h */ #ifdef KERNEL diff --git a/src/rx/rx_clock.c b/src/rx/rx_clock.c index 348cfc5f0..9d7ca3073 100644 --- a/src/rx/rx_clock.c +++ b/src/rx/rx_clock.c @@ -32,7 +32,6 @@ RCSID #else /* !UKERNEL */ #include "afs/sysincludes.h" #include "afsincludes.h" -#include "rx/rx_internal.h" #include "rx/rx.h" #include "rx/rx_clock.h" #endif /* !UKERNEL */ @@ -44,7 +43,6 @@ RCSID #include #include #include -#include "rx_internal.h" #include "rx.h" #include "rx_clock.h" #endif diff --git a/src/rx/rx_conncache.c b/src/rx/rx_conncache.c index 407bff692..368d22b80 100644 --- a/src/rx/rx_conncache.c +++ b/src/rx/rx_conncache.c @@ -24,13 +24,11 @@ RCSID #ifdef UKERNEL #include "afs/sysincludes.h" #include "afsincludes.h" -#include "rx/rx_internal.h" #include "rx/rx.h" #else /* ! UKERNEL */ #include #include #include -#include "rx_internal.h" #include "rx.h" #endif /* UKERNEL */ diff --git a/src/rx/rx_event.c b/src/rx/rx_event.c index b6669df2c..91d2899d9 100644 --- a/src/rx/rx_event.c +++ b/src/rx/rx_event.c @@ -34,7 +34,6 @@ RCSID #include "rx/rx_kernel.h" #include "rx_kmutex.h" #ifdef RX_ENABLE_LOCKS -#include "rx/rx_internal.h" #include "rx/rx.h" #endif /* RX_ENABLE_LOCKS */ #include "rx/rx_globals.h" @@ -62,7 +61,6 @@ extern void *osi_Alloc(); #include "rx_lwp.h" #endif #ifdef RX_ENABLE_LOCKS -#include "rx_internal.h" #include "rx.h" #endif /* RX_ENABLE_LOCKS */ #include "rx_globals.h" diff --git a/src/rx/rx_getaddr.c b/src/rx/rx_getaddr.c index 5420c7bef..7abde201e 100644 --- a/src/rx/rx_getaddr.c +++ b/src/rx/rx_getaddr.c @@ -32,7 +32,6 @@ RCSID * By including this, we get any system dependencies. In particular, * the pthreads for solaris requires the socket call to be mapped. */ -#include "rx_internal.h" #include "rx.h" #include "rx_globals.h" #endif /* AFS_NT40_ENV */ @@ -40,7 +39,6 @@ RCSID #ifdef UKERNEL #include "rx/rx_kcommon.h" #else /* UKERNEL */ -#include "rx/rx_internal.h" #include "rx/rx.h" #endif /* UKERNEL */ #endif /* KERNEL */ diff --git a/src/rx/rx_globals.c b/src/rx/rx_globals.c index d46564c45..8f08ab109 100644 --- a/src/rx/rx_globals.c +++ b/src/rx/rx_globals.c @@ -66,9 +66,4 @@ void rx_SetUdpBufSize(int x) rx_UdpBufSize = x; } -void rx_SetMaxClonesPerConn(int x) -{ - rx_max_clones_per_connection = x; -} - #endif /* AFS_NT40_ENV */ diff --git a/src/rx/rx_globals.h b/src/rx/rx_globals.h index 3ea679b61..2ebeea3a6 100644 --- a/src/rx/rx_globals.h +++ b/src/rx/rx_globals.h @@ -102,11 +102,9 @@ EXT int rx_UdpBufSize GLOBALSINIT(64 * 1024); #ifdef AFS_NT40_ENV int rx_GetMinUdpBufSize(void); void rx_SetUdpBufSize(int x); -void rx_SetMaxClonesPerConn(int x); #else #define rx_GetMinUdpBufSize() (64*1024) #define rx_SetUdpBufSize(x) (((x)>rx_GetMinUdpBufSize()) ? (rx_UdpBufSize = (x)):0) -#define rx_SetMaxClonesPerConn(x) (rx_max_clones_per_connection = x) #endif /* * Variables to control RX overload management. When the number of calls @@ -195,7 +193,7 @@ typedef struct rx_ts_info_t { } _FPQ; struct rx_packet * local_special_packet; } rx_ts_info_t; -struct rx_ts_info_t * rx_ts_info_init(void); /* init function for thread-specific data struct */ +EXT struct rx_ts_info_t * rx_ts_info_init(void); /* init function for thread-specific data struct */ #define RX_TS_INFO_GET(ts_info_p) \ do { \ ts_info_p = (struct rx_ts_info_t*)pthread_getspecific(rx_ts_info_key); \ @@ -253,9 +251,9 @@ EXT afs_kmutex_t rx_freePktQ_lock; EXT int rx_TSFPQGlobSize GLOBALSINIT(3); /* number of packets to transfer between global and local queues in one op */ EXT int rx_TSFPQLocalMax GLOBALSINIT(15); /* max number of packets on local FPQ before returning a glob to the global pool */ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */ -void rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local); /* more flexible packet alloc function */ -void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit); /* adjust thread-local queue length, for places where we know how many packets we will need a priori */ -void rxi_FlushLocalPacketsTSFPQ(void); /* flush all thread-local packets to global queue */ +EXT void rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local); /* more flexible packet alloc function */ +EXT void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit); /* adjust thread-local queue length, for places where we know how many packets we will need a priori */ +EXT void rxi_FlushLocalPacketsTSFPQ(void); /* flush all thread-local packets to global queue */ #define RX_TS_FPQ_FLUSH_GLOBAL 1 #define RX_TS_FPQ_PULL_GLOBAL 1 #define RX_TS_FPQ_ALLOW_OVERCOMMIT 1 @@ -628,12 +626,5 @@ EXT2 int rx_enable_stats GLOBALSINIT(0); */ EXT int rx_enable_hot_thread GLOBALSINIT(0); -/* - * Set rx_max_clones_per_connection to a value > 0 to enable connection clone - * workaround to RX_MAXCALLS limit. - */ -#define RX_HARD_MAX_CLONES 10 -EXT int rx_max_clones_per_connection GLOBALSINIT(2); - EXT int RX_IPUDP_SIZE GLOBALSINIT(_RX_IPUDP_SIZE); #endif /* AFS_RX_GLOBALS_H */ diff --git a/src/rx/rx_internal.h b/src/rx/rx_internal.h deleted file mode 100644 index 960f99a49..000000000 --- a/src/rx/rx_internal.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2008, Your File System, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the company nor the names of its contributors may - * be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RX_INTERNAL_H_ -#define _RX_INTERNAL_H_ - -#ifdef AFS_DARWIN80_ENV -#include -#endif -#ifdef AFS_SUN58_ENV -#include -#endif - -#ifdef AFS_NT40_ENV -#ifndef _WIN64 -#ifndef __cplusplus -#include -#pragma intrinsic(_InterlockedOr) -#pragma intrinsic(_InterlockedAnd) -#define rx_AtomicOr(object, operand, mutex) _InterlockedOr(&object, operand) -#define rx_AtomicAnd(object, operand, mutex) _InterlockedAnd(&object, operand) -#endif /* __cplusplus */ -#else /* !WIN64 */ -#define rx_AtomicOr(object, operand, mutex) InterlockedOr(&object, operand) -#define rx_AtomicAnd(object, operand, mutex) InterlockedAnd(&object, operand) -#endif /* WIN64 */ -#define rx_AtomicIncrement_NL(object) InterlockedIncrement(&object) -#define rx_AtomicIncrement(object, mutex) InterlockedIncrement(&object) -#define rx_AtomicXor(object, operand, mutex) InterlockedXor(&object, operand) -#define rx_AtomicAdd_NL(object, addend) InterlockedExchangeAdd(&object, addend) -#define rx_AtomicAdd(object, addend, mutex) InterlockedExchangeAdd(&object, addend) -#define rx_AtomicDecrement_NL(object) InterlockedDecrement(&object) -#define rx_AtomicDecrement(object, mutex) InterlockedDecrement(&object) -#define rx_AtomicSwap_NL(object1, object2) InterlockedExchange ((volatile LONG *) object1, object2); -#define rx_AtomicSwap(object1, object2, mutex) InterlockedExchange ((volatile LONG *) object1, object2); -#elif defined(AFS_DARWIN80_ENV) -#define rx_AtomicIncrement_NL(object) OSAtomicIncrement32(&object) -#define rx_AtomicIncrement(object, mutex) OSAtomicIncrement32(&object) -#define rx_AtomicOr(object, operand, mutex) OSAtomicOr32(operand, &object) -#define rx_AtomicAnd(object, operand, mutex) OSAtomicAnd32(operand, &object) -#define rx_AtomicXor(object, operand, mutex) OSAtomicXor32(operand, &object) -#define rx_AtomicAdd_NL(object, addend) OSAtomicAdd32(addend, &object) -#define rx_AtomicAdd(object, addend, mutex) OSAtomicAdd32(addend, &object) -#define rx_AtomicDecrement_NL(object) OSAtomicDecrement32(&object) -#define rx_AtomicDecrement(object, mutex) OSAtomicDecrement32(&object) -#define rx_AtomicSwap_NL(oldval, newval) rx_AtomicSwap_int(oldval, newval) -#define rx_AtomicSwap(oldval, newval, mutex) rx_AtomicSwap_int(oldval, newval) -static inline afs_int32 rx_AtomicSwap_int(afs_int32 *oldval, afs_int32 newval) { - afs_int32 ret = *oldval; - OSAtomicCompareAndSwap32 ((afs_int32) *oldval,(afs_int32) newval, - (afs_int32*) oldval); - return ret; -} -#elif defined(AFS_SUN58_ENV) -#define rx_AtomicIncrement_NL(object) atomic_inc_32(&object) -#define rx_AtomicIncrement(object, mutex) atomic_inc_32(&object) -#define rx_AtomicOr(object, operand, mutex) atomic_or_32(&object, operand) -#define rx_AtomicAnd(object, operand, mutex) atomic_and_32(&object, operand) -#define rx_AtomicAdd_NL(object, addend) atomic_add_32(&object, addend) -#define rx_AtomicAdd(object, addend, mutex) atomic_add_32(&object, addend) -#define rx_AtomicDecrement_NL(object) atomic_dec_32(&object) -#define rx_AtomicDecrement(object, mutex) atomic_dec_32(&object) -#define rx_AtomicSwap_NL(oldval, newval) rx_AtomicSwap_int(oldval, newval) -#define rx_AtomicSwap(oldval, newval, mutex) rx_AtomicSwap_int(oldval, newval) -static inline afs_int32 rx_AtomicSwap_int(afs_int32 *oldval, afs_int32 newval) { - afs_int32 ret = *oldval; - atomic_cas_32((afs_int32) *oldval,(afs_int32) newval, - (afs_int32*) oldval); - return ret; -} -#else -#define rx_AtomicIncrement_NL(object) (object)++ -#define rx_AtomicIncrement(object, mutex) rx_MutexIncrement(object, mutex) -#define rx_AtomicOr(object, operand, mutex) rx_MutexOr(object, operand, mutex) -#define rx_AtomicAnd(object, operand, mutex) rx_MutexAnd(object, operand, mutex) -#define rx_AtomicAdd_NL(object, addend) (object += addend) -#define rx_AtomicAdd(object, addend, mutex) rx_MutexAdd(object, addand, mutex) -#define rx_AtomicDecrement_NL(object) (object)-- -#define rx_AtomicDecrement(object, mutex) rx_MutexDecrement(object, mutex) -#define rx_AtomicSwap_NL(oldval, newval) rx_AtomicSwap_int(oldval, newval) -#define rx_AtomicSwap(oldval, newval, mutex) rx_AtomicSwap_int(oldval, newval) -static_inline afs_int32 rx_AtomicSwap_int(afs_int32 *oldval, afs_int32 newval) { - afs_int32 ret = *oldval; - *oldval = newval; - return ret; -} -#endif -#define rx_AtomicPeek_NL(object) rx_AtomicAdd_NL(object, 0) -#define rx_AtomicPeek(object, mutex) rx_AtomicAdd(object, 0, mutex) -#define rx_MutexIncrement(object, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object++; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexOr(object, operand, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object |= operand; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexAnd(object, operand, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object &= operand; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexXor(object, operand, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object ^= operand; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexAdd(object, addend, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object += addend; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexDecrement(object, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object--; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object1 += addend; \ - object2++; \ - MUTEX_EXIT(&mutex); \ - } while(0) -#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object1 += addend; \ - object2--; \ - MUTEX_EXIT(&mutex); \ - } while(0) - -#define rx_MutexAdd1AtomicIncrement2(object1, addend, object2, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object1 += addend; \ - rx_AtomicIncrement(&object2); \ - MUTEX_EXIT(&mutex); \ - } while (0) -#define rx_MutexAdd1AtomicDecrement2(object1, addend, object2, mutex) \ - do { \ - MUTEX_ENTER(&mutex); \ - object1 += addend; \ - rx_AtomicDecrement(&object2); \ - MUTEX_EXIT(&mutex); \ - } while (0) -#endif /* _RX_INTERNAL_H */ diff --git a/src/rx/rx_kcommon.c b/src/rx/rx_kcommon.c index 9d554740a..64b2524db 100644 --- a/src/rx/rx_kcommon.c +++ b/src/rx/rx_kcommon.c @@ -1197,8 +1197,8 @@ rxk_ReadPacket(osi_socket so, struct rx_packet *p, int *host, int *port) if (nbytes <= 0) { if (rx_stats_active) { MUTEX_ENTER(&rx_stats_mutex); - rx_AtomicIncrement_NL(rx_stats.bogusPacketOnRead); - rx_AtomicSwap_NL(&rx_stats.bogusHost, from.sin_addr.s_addr); + rx_stats.bogusPacketOnRead++; + rx_stats.bogusHost = from.sin_addr.s_addr; MUTEX_EXIT(&rx_stats_mutex); } dpf(("B: bogus packet from [%x,%d] nb=%d", @@ -1212,8 +1212,11 @@ rxk_ReadPacket(osi_socket so, struct rx_packet *p, int *host, int *port) *host = from.sin_addr.s_addr; *port = from.sin_port; if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) { - if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex); + if (rx_stats_active) { + MUTEX_ENTER(&rx_stats_mutex); + rx_stats.packetsRead[p->header.type - 1]++; + MUTEX_EXIT(&rx_stats_mutex); + } } #ifdef RX_TRIMDATABUFS diff --git a/src/rx/rx_kcommon.h b/src/rx/rx_kcommon.h index 51f035213..d9a46b9d8 100644 --- a/src/rx/rx_kcommon.h +++ b/src/rx/rx_kcommon.h @@ -145,7 +145,6 @@ typedef unsigned short etap_event_t; #ifdef KERNEL #include "afs/sysincludes.h" #include "afsincludes.h" -#include "rx/rx_internal.h" #endif #if defined(AFS_OBSD_ENV) #include "afs/sysincludes.h" diff --git a/src/rx/rx_lwp.c b/src/rx/rx_lwp.c index 2e8fc43f4..caae5bcdb 100644 --- a/src/rx/rx_lwp.c +++ b/src/rx/rx_lwp.c @@ -44,7 +44,6 @@ RCSID # include # include #endif -# include "rx_internal.h" # include "rx.h" # include "rx_globals.h" # include @@ -212,7 +211,7 @@ rxi_ListenerProc(fd_set * rfds, int *tnop, struct rx_call **newcallp) tv.tv_usec = cv.usec; tvp = &tv; } - rx_AtomicIncrement(rx_stats.selects, rx_stats_mutex); + rx_stats.selects++; *rfds = rx_selectMask; @@ -435,7 +434,7 @@ rxi_Sendmsg(osi_socket socket, struct msghdr *msg_p, int flags) fd_set *sfds = (fd_set *) 0; while (sendmsg(socket, msg_p, flags) == -1) { int err; - rx_AtomicIncrement(rx_stats.sendSelects, rx_stats_mutex); + rx_stats.sendSelects++; if (!sfds) { if (!(sfds = IOMGR_AllocFDSet())) { diff --git a/src/rx/rx_misc.c b/src/rx/rx_misc.c index 0c29c4553..36cf883c1 100644 --- a/src/rx/rx_misc.c +++ b/src/rx/rx_misc.c @@ -27,7 +27,6 @@ RCSID #include #include "xdr.h" #ifdef AFS_PTHREAD_ENV -#include "rx_internal.h" #include "rx.h" #endif /* AFS_PTHREAD_ENV */ #include diff --git a/src/rx/rx_multi.c b/src/rx/rx_multi.c index 7992a2086..83dddb293 100644 --- a/src/rx/rx_multi.c +++ b/src/rx/rx_multi.c @@ -15,13 +15,9 @@ RCSID #ifdef KERNEL #include "afs/sysincludes.h" -#include "rx/rx_internal.h" #include "rx/rx.h" -#include "rx/rx_globals.h" #else /* KERNEL */ -# include "rx_internal.h" # include "rx.h" -# include "rx_globals.h" #endif /* KERNEL */ /* diff --git a/src/rx/rx_null.c b/src/rx/rx_null.c index 2add44fa2..d8ff2c119 100644 --- a/src/rx/rx_null.c +++ b/src/rx/rx_null.c @@ -23,10 +23,8 @@ RCSID #else /* !UKERNEL */ #include "afs/sysincludes.h" #endif /* !UKERNEL */ -#include "rx/rx_internal.h" #include "rx/rx.h" #else /* KERNEL */ -#include "rx_internal.h" #include "rx.h" #endif /* KERNEL */ diff --git a/src/rx/rx_packet.c b/src/rx/rx_packet.c index f189fab6d..e476df923 100644 --- a/src/rx/rx_packet.c +++ b/src/rx/rx_packet.c @@ -18,77 +18,75 @@ RCSID ("$Header$"); #ifdef KERNEL -# if defined(UKERNEL) -# include "afs/sysincludes.h" -# include "afsincludes.h" -# include "rx/rx_kcommon.h" -# include "rx/rx_clock.h" -# include "rx/rx_queue.h" -# include "rx/rx_packet.h" -# else /* defined(UKERNEL) */ -# ifdef RX_KERNEL_TRACE -# include "../rx/rx_kcommon.h" -# endif -# include "h/types.h" -# ifndef AFS_LINUX20_ENV -# include "h/systm.h" -# endif -# if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) -# include "afs/sysincludes.h" -# endif -# if defined(AFS_OBSD_ENV) -# include "h/proc.h" -# endif -# include "h/socket.h" -# if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV) -# if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV) -# include "sys/mount.h" /* it gets pulled in by something later anyway */ -# endif -# include "h/mbuf.h" -# endif -# include "netinet/in.h" -# include "afs/afs_osi.h" -# include "rx_kmutex.h" -# include "rx/rx_clock.h" -# include "rx/rx_queue.h" -# ifdef AFS_SUN5_ENV -# include -# endif -# include "rx/rx_packet.h" -# endif /* defined(UKERNEL) */ -# include "rx/rx_internal.h" -# include "rx/rx_globals.h" +#if defined(UKERNEL) +#include "afs/sysincludes.h" +#include "afsincludes.h" +#include "rx/rx_kcommon.h" +#include "rx/rx_clock.h" +#include "rx/rx_queue.h" +#include "rx/rx_packet.h" +#else /* defined(UKERNEL) */ +#ifdef RX_KERNEL_TRACE +#include "../rx/rx_kcommon.h" +#endif +#include "h/types.h" +#ifndef AFS_LINUX20_ENV +#include "h/systm.h" +#endif +#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) +#include "afs/sysincludes.h" +#endif +#if defined(AFS_OBSD_ENV) +#include "h/proc.h" +#endif +#include "h/socket.h" +#if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV) +#if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV) +#include "sys/mount.h" /* it gets pulled in by something later anyway */ +#endif +#include "h/mbuf.h" +#endif +#include "netinet/in.h" +#include "afs/afs_osi.h" +#include "rx_kmutex.h" +#include "rx/rx_clock.h" +#include "rx/rx_queue.h" +#ifdef AFS_SUN5_ENV +#include +#endif +#include "rx/rx_packet.h" +#endif /* defined(UKERNEL) */ +#include "rx/rx_globals.h" #else /* KERNEL */ -# include "sys/types.h" -# include -# include -# if defined(AFS_NT40_ENV) -# include -# ifndef EWOULDBLOCK -# define EWOULDBLOCK WSAEWOULDBLOCK -# endif -# include "rx_user.h" -# include "rx_xmit_nt.h" -# include -# else -# include -# include -# endif -# include "rx_clock.h" -# include "rx_internal.h" -# include "rx.h" -# include "rx_queue.h" -# ifdef AFS_SUN5_ENV -# include -# endif -# include "rx_packet.h" -# include "rx_globals.h" -# include -# include -# include -# ifdef HAVE_UNISTD_H -# include -# endif +#include "sys/types.h" +#include +#include +#if defined(AFS_NT40_ENV) +#include +#ifndef EWOULDBLOCK +#define EWOULDBLOCK WSAEWOULDBLOCK +#endif +#include "rx_user.h" +#include "rx_xmit_nt.h" +#include +#else +#include +#include +#endif +#include "rx_clock.h" +#include "rx.h" +#include "rx_queue.h" +#ifdef AFS_SUN5_ENV +#include +#endif +#include "rx_packet.h" +#include "rx_globals.h" +#include +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif #endif /* KERNEL */ #ifdef RX_LOCKS_DB @@ -316,19 +314,19 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q) if (rx_stats_active) { switch (class) { case RX_PACKET_CLASS_RECEIVE: - rx_AtomicIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND: - rx_AtomicIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SPECIAL: - rx_AtomicIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_RECV_CBUF: - rx_AtomicIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND_CBUF: - rx_AtomicIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); break; } } @@ -1127,19 +1125,19 @@ rxi_AllocPacketNoLock(int class) if (rx_stats_active) { switch (class) { case RX_PACKET_CLASS_RECEIVE: - rx_AtomicIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND: - rx_AtomicIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SPECIAL: - rx_AtomicIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_RECV_CBUF: - rx_AtomicIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND_CBUF: - rx_AtomicIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); break; } } @@ -1148,7 +1146,7 @@ rxi_AllocPacketNoLock(int class) #endif /* KERNEL */ if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetRequests, rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex); if (queue_IsEmpty(&rx_ts_info->_FPQ)) { #ifdef KERNEL @@ -1187,19 +1185,19 @@ rxi_AllocPacketNoLock(int class) if (rx_stats_active) { switch (class) { case RX_PACKET_CLASS_RECEIVE: - rx_AtomicIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND: - rx_AtomicIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SPECIAL: - rx_AtomicIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_RECV_CBUF: - rx_AtomicIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND_CBUF: - rx_AtomicIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); break; } } @@ -1208,7 +1206,7 @@ rxi_AllocPacketNoLock(int class) #endif /* KERNEL */ if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetRequests, rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex); #ifdef KERNEL if (queue_IsEmpty(&rx_freePacketQueue)) @@ -1245,7 +1243,7 @@ rxi_AllocPacketTSFPQ(int class, int pull_global) RX_TS_INFO_GET(rx_ts_info); if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetRequests, rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex); if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) { MUTEX_ENTER(&rx_freePktQ_lock); @@ -1466,12 +1464,12 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host, if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */ if (nbytes < 0 && errno == EWOULDBLOCK) { if (rx_stats_active) - rx_AtomicIncrement(rx_stats.noPacketOnRead, rx_stats_mutex); + rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex); } else if (nbytes <= 0) { if (rx_stats_active) { MUTEX_ENTER(&rx_stats_mutex); - rx_AtomicIncrement_NL(rx_stats.bogusPacketOnRead); - rx_AtomicSwap(&rx_stats.bogusHost, from.sin_addr.s_addr, rx_stats_mutex); + rx_stats.bogusPacketOnRead++; + rx_stats.bogusHost = from.sin_addr.s_addr; MUTEX_EXIT(&rx_stats_mutex); } dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr), @@ -1506,7 +1504,7 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host, if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) { struct rx_peer *peer; if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex); /* * Try to look up this peer structure. If it doesn't exist, * don't create a new one - @@ -1523,7 +1521,7 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host, * it may have no refCount, meaning we could race with * ReapConnections */ - if (peer && (rx_AtomicPeek_NL(peer->refCount) > 0)) { + if (peer && (peer->refCount > 0)) { MUTEX_ENTER(&peer->peer_lock); hadd32(peer->bytesReceived, p->length); MUTEX_EXIT(&peer->peer_lock); @@ -1987,7 +1985,7 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket, tpeer.port = tp->port; tpeer.ifMTU = htons(tp->ifMTU); tpeer.idleWhen = htonl(tp->idleWhen); - tpeer.refCount = htons(rx_AtomicPeek_NL(tp->refCount)); + tpeer.refCount = htons(tp->refCount); tpeer.burstSize = tp->burstSize; tpeer.burst = tp->burst; tpeer.burstWait.sec = htonl(tp->burstWait.sec); @@ -2276,7 +2274,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn, p->length + RX_HEADER_SIZE, istack)) != 0) { /* send failed, so let's hurry up the resend, eh? */ if (rx_stats_active) - rx_AtomicIncrement(rx_stats.netSendFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex); p->retryTime = p->timeSent; /* resend it very soon */ clock_Addmsec(&(p->retryTime), 10 + (((afs_uint32) p->backoff) << 8)); @@ -2317,7 +2315,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn, dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length)); #endif if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex); MUTEX_ENTER(&peer->peer_lock); hadd32(peer->bytesSent, p->length); MUTEX_EXIT(&peer->peer_lock); @@ -2463,7 +2461,7 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn, istack)) != 0) { /* send failed, so let's hurry up the resend, eh? */ if (rx_stats_active) - rx_AtomicIncrement(rx_stats.netSendFailures, rx_stats_mutex); + rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex); for (i = 0; i < len; i++) { p = list[i]; p->retryTime = p->timeSent; /* resend it very soon */ @@ -2501,7 +2499,7 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn, #endif if (rx_stats_active) - rx_AtomicIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex); MUTEX_ENTER(&peer->peer_lock); hadd32(peer->bytesSent, p->length); MUTEX_EXIT(&peer->peer_lock); diff --git a/src/rx/rx_pthread.c b/src/rx/rx_pthread.c index c7804234d..d7d07ed16 100644 --- a/src/rx/rx_pthread.c +++ b/src/rx/rx_pthread.c @@ -35,7 +35,6 @@ RCSID # include #endif #include -#include "rx_internal.h" #include #include #include diff --git a/src/rx/rx_rdwr.c b/src/rx/rx_rdwr.c index 86784bafd..a8a5d80c0 100644 --- a/src/rx/rx_rdwr.c +++ b/src/rx/rx_rdwr.c @@ -58,7 +58,6 @@ RCSID #include "rx/rx_kernel.h" #include "rx/rx_clock.h" #include "rx/rx_queue.h" -#include "rx/rx_internal.h" #include "rx/rx.h" #include "rx/rx_globals.h" #include "afs/lock.h" @@ -89,7 +88,6 @@ RCSID # include "rx_user.h" # include "rx_clock.h" # include "rx_queue.h" -# include "rx_internal.h" # include "rx.h" # include "rx_globals.h" #endif /* KERNEL */ diff --git a/src/rx/rx_trace.c b/src/rx/rx_trace.c index cfe581b0e..c35ab218b 100644 --- a/src/rx/rx_trace.c +++ b/src/rx/rx_trace.c @@ -31,7 +31,6 @@ main(int argc, char **argv) #include #include #endif -#include "rx_internal.h" #include "rx.h" #include "rx_globals.h" #include "rx_trace.h" diff --git a/src/rx/rx_user.c b/src/rx/rx_user.c index 000caf77f..fdffd8526 100644 --- a/src/rx/rx_user.c +++ b/src/rx/rx_user.c @@ -48,7 +48,6 @@ RCSID #ifndef AFS_NT40_ENV # include #endif -# include "rx_internal.h" # include "rx.h" # include "rx_globals.h" @@ -199,8 +198,10 @@ rxi_GetHostUDPSocket(u_int ahost, u_short port) if (!greedy) (osi_Msg "%s*WARNING* Unable to increase buffering on socket\n", name); - if (rx_stats_active) { - rx_AtomicSwap(&rx_stats.socketGreedy, greedy, rx_stats_mutex); + if (rx_stats_active) { + MUTEX_ENTER(&rx_stats_mutex); + rx_stats.socketGreedy = greedy; + MUTEX_EXIT(&rx_stats_mutex); } } @@ -700,6 +701,8 @@ rxi_InitPeerParams(struct rx_peer *pp) struct sockaddr_in addr; #endif + + LOCK_IF_INIT; if (!Inited) { UNLOCK_IF_INIT; @@ -716,14 +719,14 @@ rxi_InitPeerParams(struct rx_peer *pp) /* try to second-guess IP, and identify which link is most likely to * be used for traffic to/from this host. */ ppaddr = ntohl(pp->host); - + pp->ifMTU = 0; pp->timeout.sec = 2; - pp->rateFlag = 2; /* start timing after two full packets */ + pp->rateFlag = 2; /* start timing after two full packets */ /* I don't initialize these, because I presume they are bzero'd... * pp->burstSize pp->burst pp->burstWait.sec pp->burstWait.usec * pp->timeout.usec */ - + LOCK_IF; for (ix = 0; ix < rxi_numNetAddrs; ++ix) { if ((rxi_NetAddrs[ix] & myNetMasks[ix]) == (ppaddr & myNetMasks[ix])) { @@ -739,7 +742,7 @@ rxi_InitPeerParams(struct rx_peer *pp) } } UNLOCK_IF; - if (!pp->ifMTU) { /* not local */ + if (!pp->ifMTU) { /* not local */ pp->timeout.sec = 3; pp->ifMTU = MIN(rx_MyMaxSendSize, RX_REMOTE_PACKET_SIZE); } diff --git a/src/rx/xdr.c b/src/rx/xdr.c index 4819b6e48..02495fc8f 100644 --- a/src/rx/xdr.c +++ b/src/rx/xdr.c @@ -59,7 +59,6 @@ RCSID #include #endif #include "xdr.h" -#include "rx_internal.h" #include "rx.h" /* diff --git a/src/rx/xdr_array.c b/src/rx/xdr_array.c index 35a6d7a2c..a37bca4d5 100644 --- a/src/rx/xdr_array.c +++ b/src/rx/xdr_array.c @@ -28,7 +28,6 @@ */ #include #include -#include "rx_internal.h" #include "rx.h" RCSID diff --git a/src/rx/xdr_refernce.c b/src/rx/xdr_refernce.c index 8a363f560..e34d94411 100644 --- a/src/rx/xdr_refernce.c +++ b/src/rx/xdr_refernce.c @@ -28,7 +28,6 @@ */ #include #include -#include "rx_internal.h" #include "rx.h" RCSID diff --git a/src/rx/xdr_rx.c b/src/rx/xdr_rx.c index 4e815a154..7016ade1f 100644 --- a/src/rx/xdr_rx.c +++ b/src/rx/xdr_rx.c @@ -53,7 +53,6 @@ RCSID #include "rpc/types.h" #include "rpc/xdr.h" #endif /* !UKERNEL */ -#include "rx/rx_internal.h" #include "rx/rx.h" #else /* KERNEL */ @@ -62,7 +61,6 @@ RCSID #ifndef AFS_NT40_ENV #include #endif -#include "rx_internal.h" #include "rx.h" #include "xdr.h" #endif /* KERNEL */ diff --git a/src/rxdebug/rxdebug.c b/src/rxdebug/rxdebug.c index 5c48a5775..637357b8a 100644 --- a/src/rxdebug/rxdebug.c +++ b/src/rxdebug/rxdebug.c @@ -417,8 +417,6 @@ MainCommand(struct cmd_syndesc *as, void *arock) printf(" busy"); if (tconn.flags & RX_CONN_ATTACHWAIT) printf(" attachWait"); - if (tconn.flags & RX_CLONED_CONNECTION) - printf(" clone"); printf(", "); } printf("security index %d, ", tconn.securityIndex); -- 2.39.5