call->nCwindAcks = 0;
} else if (nNacked && call->nNacks >= (u_short) rx_nackThreshold) {
/* Three negative acks in a row trigger congestion recovery */
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- MUTEX_EXIT(&peer->peer_lock);
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* someone else is waiting to start recovery */
- return np;
- }
- call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- rxi_WaitforTQBusy(call);
- MUTEX_ENTER(&peer->peer_lock);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
call->flags |= RX_CALL_FAST_RECOVER;
call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
call->cwind =
int istack)
{
int i;
+ int recovery;
struct xmitlist working;
struct xmitlist last;
working.len = 0;
working.resending = 0;
+ recovery = call->flags & RX_CALL_FAST_RECOVER;
+
for (i = 0; i < len; i++) {
/* Does the current packet force us to flush the current list? */
if (working.len > 0
rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
- if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ if (call->error
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
last = working;
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error
- || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
last = working;
rxi_SendList(call, &last, istack, morePackets);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
- if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ if (call->error
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
if (morePackets) {
goto out;
}
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* Someone else is waiting to start recovery */
- goto out;
- }
- call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- rxi_WaitforTQBusy(call);
- call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
- if (call->error)
- goto out;
-#endif
-
/* We're in loss recovery */
call->flags |= RX_CALL_FAST_RECOVER;
nXmitPackets = 0;
maxXmitPackets = MIN(call->twind, call->cwind);
for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* We shouldn't be sending packets if a thread is waiting
- * to initiate congestion recovery */
- dpf(("call %d waiting to initiate fast recovery\n",
- *(call->callNumber)));
- break;
- }
if ((nXmitPackets)
&& (call->flags & RX_CALL_FAST_RECOVER)) {
/* Only send one packet during fast recovery */
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- /*
- * TQ references no longer protected by this flag; they must remain
- * protected by the global lock.
- */
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- call->flags &= ~RX_CALL_TQ_BUSY;
- rxi_WakeUpTransmitQueue(call);
- return;
- }
if (call->error) {
/* We went into the error state while sending packets. Now is
* the time to reset the call. This will also inform the using
call->tqc++;
#endif /* RXDEBUG_PACKET */
cp = (struct rx_packet *)0;
- if (!
- (call->
- flags & (RX_CALL_FAST_RECOVER |
- RX_CALL_FAST_RECOVER_WAIT))) {
+ /* If the call is in recovery, let it exhaust its current
+ * retransmit queue before forcing it to send new packets
+ */
+ if (!(call->flags & (RX_CALL_FAST_RECOVER))) {
rxi_Start(call, 0);
}
} else if (cp) {
queue_SpliceAppend(&call->tq, &tmpq);
- if (!(call->flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
+ /* If the call is in recovery, let it exhaust its current retransmit
+ * queue before forcing it to send new packets
+ */
+ if (!(call->flags & RX_CALL_FAST_RECOVER)) {
rxi_Start(call, 0);
}
#ifdef RXDEBUG_PACKET
call->tqc++;
#endif /* RXDEBUG_PACKET */
- if (!
- (call->
- flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
+
+ /* If the call is in recovery, let it exhaust its current retransmit
+ * queue before forcing it to send new packets
+ */
+ if (!(call->flags & RX_CALL_FAST_RECOVER)) {
rxi_Start(call, 0);
}
MUTEX_EXIT(&call->lock);