]> git.michaelhowe.org Git - packages/o/openafs.git/commitdiff
rx: Don't malloc the xmit list
authorSimon Wilkinson <sxw@your-file-system.com>
Sun, 10 Oct 2010 12:04:41 +0000 (08:04 -0400)
committerDerrick Brashear <shadow@dementia.org>
Tue, 26 Oct 2010 00:49:34 +0000 (17:49 -0700)
Building the transmit list happens in a time critical section of
code. Using malloc to allocate the list which holds the packets to
be transmitted slows down this critical section. Instead, just
allocate the space as part of the call structure.

Locking of xmitList is somewhat tricksy, as the call->lock is
dropped over calls to sendmsg(). However, the xmitList is protected
by the TQ_BUSY call flag, which prevents multiple threads from
usign the transmit queue, and hence the xmitList, simultaneously.

Reviewed-on: http://gerrit.openafs.org/2957
Reviewed-by: Jeffrey Altman <jaltman@openafs.org>
Tested-by: Jeffrey Altman <jaltman@openafs.org>
(cherry picked from commit 09aec1539dbe4e4c72ec7acadf2d25e6f93c5982)
Change-Id: I589fa248a124a15248e23b6154e193e4d136fde8
Reviewed-on: http://gerrit.openafs.org/3102
Tested-by: Derrick Brashear <shadow@dementia.org>
Reviewed-by: Derrick Brashear <shadow@dementia.org>
src/rx/rx.c
src/rx/rx.h

index cf325a72acc4f650a0e53231616ffbeb3d831b0a..fe6846071995a49bd74d1bd5ce8bdd8549b9ec3f 100644 (file)
@@ -5552,7 +5552,6 @@ rxi_Start(struct rxevent *event,
     int haveEvent;
     int nXmitPackets;
     int maxXmitPackets;
-    struct rx_packet **xmitList;
     int resending = 0;
 
     /* If rxi_Start is being called as a result of a resend event,
@@ -5654,15 +5653,6 @@ rxi_Start(struct rxevent *event,
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
                nXmitPackets = 0;
                maxXmitPackets = MIN(call->twind, call->cwind);
-               xmitList = (struct rx_packet **)
-#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
-                   /* XXXX else we must drop any mtx we hold */
-                   afs_osi_Alloc_NoSleep(maxXmitPackets * sizeof(struct rx_packet *));
-#else
-               osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
-#endif
-               if (xmitList == NULL)
-                   osi_Panic("rxi_Start, failed to allocate xmit list");
                for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
                    if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
                        /* We shouldn't be sending packets if a thread is waiting
@@ -5716,11 +5706,9 @@ rxi_Start(struct rxevent *event,
                    /* Transmit the packet if it needs to be sent. */
                    if (!clock_Lt(&now, &p->retryTime)) {
                        if (nXmitPackets == maxXmitPackets) {
-                           rxi_SendXmitList(call, xmitList, nXmitPackets,
-                                            istack, &now, &retryTime,
-                                            resending);
-                           osi_Free(xmitList, maxXmitPackets *
-                                    sizeof(struct rx_packet *));
+                           rxi_SendXmitList(call, call->xmitList,
+                                            nXmitPackets, istack, &now, 
+                                            &retryTime, resending);
                            goto restart;
                        }
                         dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
@@ -5728,18 +5716,16 @@ rxi_Start(struct rxevent *event,
                               now.sec, now.usec,
                               p->retryTime.sec, p->retryTime.usec,
                               retryTime.sec, retryTime.usec));
-                       xmitList[nXmitPackets++] = p;
+                       call->xmitList[nXmitPackets++] = p;
                    }
                }
 
                /* xmitList now hold pointers to all of the packets that are
                 * ready to send. Now we loop to send the packets */
                if (nXmitPackets > 0) {
-                   rxi_SendXmitList(call, xmitList, nXmitPackets, istack,
-                                    &now, &retryTime, resending);
+                   rxi_SendXmitList(call, call->xmitList, nXmitPackets,
+                                    istack, &now, &retryTime, resending);
                }
-               osi_Free(xmitList,
-                        maxXmitPackets * sizeof(struct rx_packet *));
 
 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
                /*
index 16957b3a305346eab7d1d4fd5e835a748153b627..8d3a1d9f1b3ecdd0fd58db2e2e51be5d222ed4e6 100644 (file)
@@ -459,6 +459,9 @@ struct rx_peer {
 #define        RX_SERVER_CONNECTION    1
 #endif /* !KDUMP_RX_LOCK */
 
+/* Maximum number of acknowledgements in an acknowledge packet */
+#define        RX_MAXACKS          255
+
 /* Call structure:  only instantiated for active calls and dallying server calls.  The permanent call state (i.e. the call number as well as state shared with other calls associated with this connection) is maintained in the connection structure. */
 #ifdef KDUMP_RX_LOCK
 struct rx_call_rx_lock {
@@ -583,6 +586,8 @@ struct rx_call {
     afs_hyper_t bytesRcvd;     /* Number bytes received */
     u_short tqWaiters;
 
+    struct rx_packet *xmitList[RX_MAXACKS]; /* Can't xmit more than we ack */
+                                /* Protected by setting RX_CALL_TQ_BUSY */
 #ifdef ADAPT_WINDOW
     struct clock pingRequestTime;
 #endif
@@ -634,8 +639,6 @@ struct rx_call {
 #define RX_CALL_HAVE_LAST      32768   /* Last packet has been received */
 #define RX_CALL_NEED_START     0x10000 /* tells rxi_Start to start again */
 
-/* Maximum number of acknowledgements in an acknowledge packet */
-#define        RX_MAXACKS          255
 
 /* The structure of the data portion of an acknowledge packet: An acknowledge
  * packet is in network byte order at all times.  An acknowledgement is always