/* Send all of the packets in the list in single datagram */
static void
rxi_SendList(struct rx_call *call, struct xmitlist *xmit,
- int istack, int moreFlag, struct clock *now,
- struct clock *retryTime)
+ int istack, int moreFlag)
{
int i;
int requestAck = 0;
int lastPacket = 0;
+ struct clock now, retryTime;
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
peer->nSent += xmit->len;
if (xmit->resending)
peer->reSends += xmit->len;
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
if (rx_stats_active) {
rx_atomic_add(&rx_stats.dataPacketsSent, xmit->len);
}
+ clock_GetTime(&now);
+ clock_Add(&retryTime, &now);
+
if (xmit->list[xmit->len - 1]->header.flags & RX_LAST_PACKET) {
lastPacket = 1;
}
for (i = 0; i < xmit->len; i++) {
struct rx_packet *packet = xmit->list[i];
- packet->retryTime = *retryTime;
+ packet->retryTime = retryTime;
if (packet->header.serial) {
/* Exponentially backoff retry times */
if (packet->backoff < MAXBACKOFF) {
}
/* Record the time sent */
- packet->timeSent = *now;
+ packet->timeSent = now;
/* Ask for an ack on retransmitted packets, on every other packet
* if the peer doesn't support slow start. Ask for an ack on every
requestAck = 1;
} else {
/* improved RTO calculation- not Karn */
- packet->firstSent = *now;
+ packet->firstSent = now;
if (!lastPacket && (call->cwind <= (u_short) (conn->ackRate + 1)
|| (!(call->flags & RX_CALL_SLOW_START_OK)
&& (packet->header.seq & 1)))) {
if (i < xmit->len - 1 || moreFlag) {
packet->header.flags |= RX_MORE_PACKETS;
}
-
- /* Install the new retransmit time for the packet, and
- * record the time sent */
- packet->timeSent = *now;
}
if (requestAck) {
static void
rxi_SendXmitList(struct rx_call *call, struct rx_packet **list, int len,
- int istack, struct clock *now, struct clock *retryTime)
+ int istack)
{
int i;
struct xmitlist working;
* set into the 'last' one, and resets the working set */
if (last.len > 0) {
- rxi_SendList(call, &last, istack, 1, now, retryTime);
+ rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
|| list[i]->header.serial
|| list[i]->length != RX_JUMBOBUFFERSIZE) {
if (last.len > 0) {
- rxi_SendList(call, &last, istack, 1, now, retryTime);
+ rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error
morePackets = 1;
}
if (last.len > 0) {
- rxi_SendList(call, &last, istack, morePackets, now, retryTime);
+ rxi_SendList(call, &last, istack, morePackets);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
return;
}
if (morePackets) {
- rxi_SendList(call, &working, istack, 0, now, retryTime);
+ rxi_SendList(call, &working, istack, 0);
}
} else if (last.len > 0) {
- rxi_SendList(call, &last, istack, 0, now, retryTime);
+ rxi_SendList(call, &last, istack, 0);
/* Packets which are in 'working' are not sent by this call */
}
}
}
if (queue_IsNotEmpty(&call->tq)) { /* If we have anything to send */
- /* Get clock to compute the re-transmit time for any packets
- * in this burst. Note, if we back off, it's reasonable to
- * back off all of the packets in the same manner, even if
- * some of them have been retransmitted more times than more
- * recent additions.
- * Do a dance to avoid blocking after setting now. */
- MUTEX_ENTER(&peer->peer_lock);
- retryTime = peer->timeout;
- MUTEX_EXIT(&peer->peer_lock);
- clock_GetTime(&now);
- clock_Add(&retryTime, &now);
+ clock_GetTime(&now);
usenow = now;
+
/* Send (or resend) any packets that need it, subject to
* window restrictions and congestion burst control
* restrictions. Ask for an ack on the last packet sent in
if (!clock_Lt(&now, &p->retryTime)) {
if (nXmitPackets == maxXmitPackets) {
rxi_SendXmitList(call, call->xmitList,
- nXmitPackets, istack, &now,
- &retryTime);
+ nXmitPackets, istack);
goto restart;
}
- dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
+ dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u\n",
*(call->callNumber), p,
now.sec, now.usec,
- p->retryTime.sec, p->retryTime.usec,
- retryTime.sec, retryTime.usec));
+ p->retryTime.sec, p->retryTime.usec));
call->xmitList[nXmitPackets++] = p;
}
}
* ready to send. Now we loop to send the packets */
if (nXmitPackets > 0) {
rxi_SendXmitList(call, call->xmitList, nXmitPackets,
- istack, &now, &retryTime);
+ istack);
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL