]> git.michaelhowe.org Git - packages/o/openafs.git/commitdiff
rx: Don't let timeouts force fast recovery
authorSimon Wilkinson <sxw@your-file-system.com>
Mon, 25 Oct 2010 09:14:12 +0000 (10:14 +0100)
committerDerrick Brashear <shadow@dementia.org>
Wed, 27 Oct 2010 00:26:24 +0000 (17:26 -0700)
The current RX implementation goes into fast recovery whenever a
timeout occurs. This is incredibly wasteful, particularly on fast
connections. So, remove this in favour of TCP style behaviour.

(cherry picked from commit 36e2d13b55085c996d38b30d003296c602ef8ee3)
Reviewed-on: http://gerrit.openafs.org/3138
Reviewed-by: Jeffrey Altman <jaltman@openafs.org>
Tested-by: Derrick Brashear <shadow@dementia.org>
Reviewed-by: Derrick Brashear <shadow@dementia.org>
Change-Id: I43102ffc961d33255c995f4ff84142b7203cd11b
Reviewed-on: http://gerrit.openafs.org/3154
Reviewed-by: Derrick Brashear <shadow@dementia.org>
Tested-by: Derrick Brashear <shadow@dementia.org>
src/rx/rx.c

index 4b1ebb9a7396bfcf72f5af7df0d04c074ee2289b..9027dde30e45d0b7bac9dcad233f3420f52deead 100644 (file)
@@ -5541,7 +5541,6 @@ rxi_Start(struct rxevent *event,
 
     struct rx_packet *p;
     struct rx_packet *nxp;     /* Next pointer for queue_Scan */
-    struct rx_peer *peer = call->conn->peer;
     struct clock now, usenow, retryTime;
     int haveEvent;
     int nXmitPackets;
@@ -5560,45 +5559,8 @@ rxi_Start(struct rxevent *event,
            /* Nothing to do */
            return;
        }
-       /* Timeouts trigger congestion recovery */
-#ifdef  AFS_GLOBAL_RXLOCK_KERNEL
-       if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
-           /* someone else is waiting to start recovery */
-           return;
-       }
-       call->flags |= RX_CALL_FAST_RECOVER_WAIT;
-       rxi_WaitforTQBusy(call);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
-       call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
-       call->flags |= RX_CALL_FAST_RECOVER;
-       if (peer->maxDgramPackets > 1) {
-           call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
-       } else {
-           call->MTU = MIN(peer->natMTU, peer->maxMTU);
-       }
-       call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
-       call->nDgramPackets = 1;
-       call->cwind = 1;
-       call->nextCwind = 1;
-       call->nAcks = 0;
-       call->nNacks = 0;
-       MUTEX_ENTER(&peer->peer_lock);
-       peer->MTU = call->MTU;
-       peer->cwind = call->cwind;
-       peer->nDgramPackets = 1;
-       peer->congestSeq++;
-       call->congestSeq = peer->congestSeq;
-       MUTEX_EXIT(&peer->peer_lock);
-       /* Clear retry times on packets. Otherwise, it's possible for
-        * some packets in the queue to force resends at rates faster
-        * than recovery rates.
-        */
-       for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
-           if (!(p->flags & RX_PKTFLAG_ACKED)) {
-               clock_Zero(&p->retryTime);
-           }
-       }
     }
+
     if (call->error) {
 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
         if (rx_stats_active)