From: Simon Wilkinson Date: Sat, 22 Oct 2011 15:37:04 +0000 (+0100) Subject: rx: Turn the rxevent_Cancel macro into a function X-Git-Tag: upstream/1.8.0_pre1^2~3049 X-Git-Url: https://git.michaelhowe.org/gitweb/?a=commitdiff_plain;h=a7d569c857234626191514e2780368e1b85a6dae;p=packages%2Fo%2Fopenafs.git rx: Turn the rxevent_Cancel macro into a function Turn rxevent_Cancel into a function rather than a macro which modifies its argument as a side effect. rxevent_Cancel now checks whether the event being cancelled is already NULL, as well as NULLifying the event when it is actually cancelled. Update all of the callers to reflect this new API, and so they no longer do unecessary work. Change-Id: I75b68f1c8f1a3023edd6113600663fe2b60d6097 Reviewed-on: http://gerrit.openafs.org/5840 Tested-by: BuildBot Reviewed-by: Derrick Brashear --- diff --git a/src/rx/rx.c b/src/rx/rx.c index 5fb6864b8..cd1aa75f9 100644 --- a/src/rx/rx.c +++ b/src/rx/rx.c @@ -674,10 +674,7 @@ rxi_rto_startTimer(struct rx_call *call, int lastPacket, int istack) static_inline void rxi_rto_cancel(struct rx_call *call) { - if (!call->resendEvent) - return; - - rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND); + rxevent_Cancel(&call->resendEvent, call, RX_CALL_REFCOUNT_RESEND); } /*! @@ -781,7 +778,7 @@ rxi_PostDelayedAckEvent(struct rx_call *call, struct clock *offset) if (!call->delayedAckEvent || clock_Gt(&call->delayedAckEvent->eventTime, &when)) { - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); MUTEX_ENTER(&rx_refcnt_mutex); CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY); @@ -1253,7 +1250,7 @@ rxi_DestroyConnectionNoLock(struct rx_connection *conn) /* Push the final acknowledgment out now--there * won't be a subsequent call to acknowledge the * last reply packets */ - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); if (call->state == RX_STATE_PRECALL || call->state == RX_STATE_ACTIVE) { @@ -1294,7 +1291,7 @@ rxi_DestroyConnectionNoLock(struct rx_connection *conn) } if (conn->delayedAbortEvent) { - rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0); + rxevent_Cancel(&conn->delayedAbortEvent, NULL, 0); packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL); if (packet) { MUTEX_ENTER(&conn->conn_data_lock); @@ -1322,12 +1319,9 @@ rxi_DestroyConnectionNoLock(struct rx_connection *conn) /* Make sure the connection is completely reset before deleting it. */ /* get rid of pending events that could zap us later */ - if (conn->challengeEvent) - rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0); - if (conn->checkReachEvent) - rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0); - if (conn->natKeepAliveEvent) - rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0); + rxevent_Cancel(&conn->challengeEvent, NULL, 0); + rxevent_Cancel(&conn->checkReachEvent, NULL, 0); + rxevent_Cancel(&conn->natKeepAliveEvent, NULL, 0); /* Add the connection to the list of destroyed connections that * need to be cleaned up. This is necessary to avoid deadlocks @@ -2375,7 +2369,7 @@ rx_EndCall(struct rx_call *call, afs_int32 rc) call->state = RX_STATE_DALLY; rxi_ClearTransmitQueue(call, 0); rxi_rto_cancel(call); - rxevent_Cancel(call->keepAliveEvent, call, + rxevent_Cancel(&call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE); } } else { /* Client connection */ @@ -2393,9 +2387,8 @@ rx_EndCall(struct rx_call *call, afs_int32 rc) * and force-send it now. */ if (call->delayedAckEvent) { - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); - call->delayedAckEvent = NULL; rxi_SendDelayedAck(NULL, call, NULL, 0); } @@ -3898,7 +3891,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, if (rx_stats_active) rx_atomic_inc(&rx_stats.dupPacketsRead); dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate\n", np)); - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack); ackNeeded = 0; @@ -3988,7 +3981,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, if (seq < call->rnext) { if (rx_stats_active) rx_atomic_inc(&rx_stats.dupPacketsRead); - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack); ackNeeded = 0; @@ -4000,7 +3993,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, * accomodated by the current window, then send a negative * acknowledge and drop the packet */ if ((call->rnext + call->rwind) <= seq) { - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_EXCEEDS_WINDOW, istack); @@ -4016,7 +4009,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, if (seq == tp->header.seq) { if (rx_stats_active) rx_atomic_inc(&rx_stats.dupPacketsRead); - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack); @@ -4126,10 +4119,10 @@ rxi_ReceiveDataPacket(struct rx_call *call, * received. Always send a soft ack for the last packet in * the server's reply. */ if (ackNeeded) { - rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, ackNeeded, istack); } else if (call->nSoftAcks > (u_short) rxi_SoftAckRate) { - rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_IDLE, istack); } else if (call->nSoftAcks) { if (haveLast && !(flags & RX_CLIENT_INITIATED)) @@ -4137,7 +4130,7 @@ rxi_ReceiveDataPacket(struct rx_call *call, else rxi_PostDelayedAckEvent(call, &rx_softAckDelay); } else if (call->flags & RX_CALL_RECEIVE_DONE) { - rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); } return np; @@ -4702,7 +4695,7 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np, && call->tfirst + call->nSoftAcked >= call->tnext) { call->state = RX_STATE_DALLY; rxi_ClearTransmitQueue(call, 0); - rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE); + rxevent_Cancel(&call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE); } else if (!queue_IsEmpty(&call->tq)) { rxi_Start(call, istack); } @@ -5077,7 +5070,7 @@ rxi_SendCallAbort(struct rx_call *call, struct rx_packet *packet, if (force || rxi_callAbortThreshhold == 0 || call->abortCount < rxi_callAbortThreshhold) { if (call->delayedAbortEvent) { - rxevent_Cancel(call->delayedAbortEvent, call, + rxevent_Cancel(&call->delayedAbortEvent, call, RX_CALL_REFCOUNT_ABORT); } error = htonl(call->error); @@ -5123,9 +5116,8 @@ rxi_SendConnectionAbort(struct rx_connection *conn, if (force || rxi_connAbortThreshhold == 0 || conn->abortCount < rxi_connAbortThreshhold) { - if (conn->delayedAbortEvent) { - rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0); - } + + rxevent_Cancel(&conn->delayedAbortEvent, NULL, 0); error = htonl(conn->error); conn->abortCount++; MUTEX_EXIT(&conn->conn_data_lock); @@ -5159,13 +5151,10 @@ rxi_ConnectionError(struct rx_connection *conn, dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d\n", conn, error)); MUTEX_ENTER(&conn->conn_data_lock); - if (conn->challengeEvent) - rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0); - if (conn->natKeepAliveEvent) - rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0); + rxevent_Cancel(&conn->challengeEvent, NULL, 0); + rxevent_Cancel(&conn->natKeepAliveEvent, NULL, 0); if (conn->checkReachEvent) { - rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0); - conn->checkReachEvent = 0; + rxevent_Cancel(&conn->checkReachEvent, NULL, 0); conn->flags &= ~(RX_CONN_ATTACHWAIT|RX_CONN_NAT_PING); MUTEX_ENTER(&rx_refcnt_mutex); conn->refCount--; @@ -5246,12 +5235,11 @@ rxi_ResetCall(struct rx_call *call, int newcall) call->arrivalProc = (void (*)())0; } - if (call->growMTUEvent) - rxevent_Cancel(call->growMTUEvent, call, - RX_CALL_REFCOUNT_ALIVE); + + rxevent_Cancel(&call->growMTUEvent, call, RX_CALL_REFCOUNT_ALIVE); if (call->delayedAbortEvent) { - rxevent_Cancel(call->delayedAbortEvent, call, RX_CALL_REFCOUNT_ABORT); + rxevent_Cancel(&call->delayedAbortEvent, call, RX_CALL_REFCOUNT_ABORT); packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL); if (packet) { rxi_SendCallAbort(call, packet, 0, 1); @@ -5381,7 +5369,7 @@ rxi_ResetCall(struct rx_call *call, int newcall) #endif /* RX_ENABLE_LOCKS */ rxi_KeepAliveOff(call); - rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); } /* Send an acknowledge for the indicated packet (seq,serial) of the @@ -5744,7 +5732,7 @@ rxi_SendList(struct rx_call *call, struct xmitlist *xmit, /* Since we're about to send a data packet to the peer, it's * safe to nuke any scheduled end-of-packets ack */ - rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); MUTEX_EXIT(&call->lock); MUTEX_ENTER(&rx_refcnt_mutex); @@ -6145,7 +6133,7 @@ rxi_Send(struct rx_call *call, struct rx_packet *p, /* Since we're about to send SOME sort of packet to the peer, it's * safe to nuke any scheduled end-of-packets ack */ - rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); /* Actually send the packet, filling in more connection-specific fields */ MUTEX_EXIT(&call->lock); @@ -6248,14 +6236,13 @@ rxi_CheckCall(struct rx_call *call) } else { #ifdef RX_ENABLE_LOCKS /* Cancel pending events */ - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); rxi_rto_cancel(call); - rxevent_Cancel(call->keepAliveEvent, call, + rxevent_Cancel(&call->keepAliveEvent, call, + RX_CALL_REFCOUNT_ALIVE); + rxevent_Cancel(&call->growMTUEvent, call, RX_CALL_REFCOUNT_ALIVE); - if (call->growMTUEvent) - rxevent_Cancel(call->growMTUEvent, call, - RX_CALL_REFCOUNT_ALIVE); MUTEX_ENTER(&rx_refcnt_mutex); if (call->refCount == 0) { rxi_FreeCall(call, haveCTLock); diff --git a/src/rx/rx_event.c b/src/rx/rx_event.c index 6bfbc3317..39798cf2c 100644 --- a/src/rx/rx_event.c +++ b/src/rx/rx_event.c @@ -337,9 +337,10 @@ int rxevent_Cancel_type = 0; #endif void -rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call, - int type) +rxevent_Cancel(struct rxevent **evp, struct rx_call *call, int type) { + struct rxevent *ev = *evp; + #ifdef RXDEBUG if (rx_Log_event) { struct clock now; @@ -359,6 +360,9 @@ rxevent_Cancel_1(struct rxevent *ev, struct rx_call *call, MUTEX_EXIT(&rxevent_lock); return; } + + *evp = NULL; + #ifdef RX_ENABLE_LOCKS /* It's possible we're currently processing this event. */ if (queue_IsOnQueue(ev)) { diff --git a/src/rx/rx_event.h b/src/rx/rx_event.h index e31d1b46b..e1921a205 100644 --- a/src/rx/rx_event.h +++ b/src/rx/rx_event.h @@ -83,33 +83,9 @@ extern struct rxevent *rxevent_Post( /* when, func, arg, arg1 */ ); * pending. Also see the warning, above. The event pointer supplied * is zeroed. */ -#ifdef RX_ENABLE_LOCKS -#ifdef RX_REFCOUNT_CHECK -#define rxevent_Cancel(event_ptr, call, type) \ - BEGIN \ - if (event_ptr) { \ - rxevent_Cancel_1(event_ptr, call, type); \ - event_ptr = NULL; \ - } \ - END -#else /* RX_REFCOUNT_CHECK */ -#define rxevent_Cancel(event_ptr, call, type) \ - BEGIN \ - if (event_ptr) { \ - rxevent_Cancel_1(event_ptr, call, 0); \ - event_ptr = NULL; \ - } \ - END -#endif /* RX_REFCOUNT_CHECK */ -#else /* RX_ENABLE_LOCKS */ -#define rxevent_Cancel(event_ptr, call, type) \ - BEGIN \ - if (event_ptr) { \ - rxevent_Cancel_1(event_ptr, NULL, 0); \ - event_ptr = NULL; \ - } \ - END -#endif /* RX_ENABLE_LOCKS */ +#if 0 +extern struct rxevent *rxevent_Cancel(struct rxevent *, struct rx_call *, int) +#endif /* The actions specified for each event that has reached the current clock * time will be taken. The current time returned by GetTime is used diff --git a/src/rx/rx_globals.h b/src/rx/rx_globals.h index 1e51890d0..f8c01434e 100644 --- a/src/rx/rx_globals.h +++ b/src/rx/rx_globals.h @@ -526,9 +526,12 @@ EXT afs_kmutex_t rx_connHashTable_lock; #define PEER_HASH(host, port) ((host ^ port) % rx_hashTableSize) /* Forward definitions of internal procedures */ -#define rxi_ChallengeOff(conn) rxevent_Cancel((conn)->challengeEvent, (struct rx_call*)0, 0); -#define rxi_KeepAliveOff(call) rxevent_Cancel((call)->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE) -#define rxi_NatKeepAliveOff(conn) rxevent_Cancel((conn)->natKeepAliveEvent, (struct rx_call*)0, 0) +#define rxi_ChallengeOff(conn) \ + rxevent_Cancel(&(conn)->challengeEvent, NULL, 0) +#define rxi_KeepAliveOff(call) \ + rxevent_Cancel(&(call)->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE) +#define rxi_NatKeepAliveOff(conn) \ + rxevent_Cancel(&(conn)->natKeepAliveEvent, NULL, 0) #define rxi_AllocSecurityObject() rxi_Alloc(sizeof(struct rx_securityClass)) #define rxi_FreeSecurityObject(obj) rxi_Free(obj, sizeof(struct rx_securityClass)) diff --git a/src/rx/rx_prototypes.h b/src/rx/rx_prototypes.h index f07439fed..6c8d46d66 100644 --- a/src/rx/rx_prototypes.h +++ b/src/rx/rx_prototypes.h @@ -290,9 +290,8 @@ extern struct rxevent *rxevent_Post(struct clock *when, struct clock *now, extern void shutdown_rxevent(void); extern struct rxepoch *rxepoch_Allocate(struct clock *when); extern void rxevent_Init(int nEvents, void (*scheduler) (void)); -extern void rxevent_Cancel_1(struct rxevent *ev, - struct rx_call *call, - int type); +extern void rxevent_Cancel(struct rxevent **ev, struct rx_call *call, + int type); extern int rxevent_RaiseEvents(struct clock *next); diff --git a/src/rx/rx_rdwr.c b/src/rx/rx_rdwr.c index 6fc124bdd..267fe5083 100644 --- a/src/rx/rx_rdwr.c +++ b/src/rx/rx_rdwr.c @@ -172,7 +172,7 @@ rxi_ReadProc(struct rx_call *call, char *buf, call->nHardAcks++; if (!(call->flags & RX_CALL_RECEIVE_DONE)) { if (call->nHardAcks > (u_short) rxi_HardAckRate) { - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0); } else { @@ -523,7 +523,7 @@ rxi_FillReadVec(struct rx_call *call, afs_uint32 serial) * send a hard ack. */ if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE))) { if (call->nHardAcks > (u_short) rxi_HardAckRate) { - rxevent_Cancel(call->delayedAckEvent, call, + rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); rxi_SendAck(call, 0, serial, RX_ACK_DELAY, 0); didHardAck = 1;