#define INIT_PTHREAD_LOCKS
#endif
-extern void rxi_DeleteCachedConnections(void);
-
/* Variables for handling the minProcs implementation. availProcs gives the
* number of threads available in the pool at this moment (not counting dudes
* to manipulate the queue.
*/
-extern void rxi_Delay(int);
-
-static int rxi_ServerThreadSelectingCall;
-
#ifdef RX_ENABLE_LOCKS
+static int rxi_ServerThreadSelectingCall;
static afs_kmutex_t rx_rpc_stats;
void rxi_StartUnlocked();
#endif
#define CLEAR_CALL_QUEUE_LOCK(C)
#endif /* RX_ENABLE_LOCKS */
static void rxi_DestroyConnectionNoLock();
-void rxi_DestroyConnection();
-void rxi_CleanupConnection();
struct rx_serverQueueEntry *rx_waitForPacket = 0;
/* ------------Exported Interfaces------------- */
}
#else /* RX_ENABLE_LOCKS */
-static QuotaOK(aservice)
+static int QuotaOK(aservice)
register struct rx_service *aservice; {
int rc=0;
/* under min quota, we're OK */
if (donateMe) {
#ifndef AFS_NT40_ENV
#ifndef KERNEL
- int code;
char name[32];
#ifdef AFS_PTHREAD_ENV
pid_t pid;
pid = (pid_t) pthread_self();
#else /* AFS_PTHREAD_ENV */
PROCESS pid;
- code = LWP_CurrentProcess(&pid);
+ LWP_CurrentProcess(&pid);
#endif /* AFS_PTHREAD_ENV */
sprintf(name,"srv_%d", ++nProcs);
return call;
}
+int
rxi_HasActiveCalls(aconn)
register struct rx_connection *aconn; {
register int i;
return 0;
}
+int
rxi_GetCallNumberVector(aconn, aint32s)
register struct rx_connection *aconn;
register afs_int32 *aint32s; {
return 0;
}
+int
rxi_SetCallNumberVector(aconn, aint32s)
register struct rx_connection *aconn;
register afs_int32 *aint32s; {
{
struct rx_serverQueueEntry *sq;
register struct rx_call *call = (struct rx_call *) 0, *choice2;
- struct rx_service *service;
+ struct rx_service *service = NULL;
SPLVAR;
MUTEX_ENTER(&freeSQEList_lock);
- if (sq = rx_FreeSQEList) {
+ if ((sq = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
if (rx_Log) {
fprintf( rx_Log,
"RACK: reason %x previous %u seq %u serial %u skew %d first %u",
- ap->reason, ntohl(ap->previousPacket), np->header.seq, serial,
- skew, ntohl(ap->firstPacket));
+ ap->reason, ntohl(ap->previousPacket),
+ (unsigned int) np->header.seq, (unsigned int) serial,
+ (unsigned int) skew, ntohl(ap->firstPacket));
if (nAcks) {
int offset;
for (offset = 0; offset < nAcks; offset++)
#ifdef RXDEBUG
if (rx_Log) {
fprintf(rx_Log, "SACK: reason %x previous %u seq %u first %u",
- ap->reason, ntohl(ap->previousPacket), p->header.seq,
- ntohl(ap->firstPacket));
+ ap->reason, ntohl(ap->previousPacket),
+ (unsigned int) p->header.seq, ntohl(ap->firstPacket));
if (ap->nAcks) {
for (offset = 0; offset < ap->nAcks; offset++)
putc(ap->acks[offset] == RX_ACK_TYPE_NACK? '-' : '*', rx_Log);
struct timeval temptime;
#endif
register int rtt_timeout;
- static char id[]="@(#)adaptive RTO";
#if defined(AFS_ALPHA_LINUX20_ENV) && defined(AFS_PTHREAD_ENV) && !defined(KERNEL)
/* yet again. This was the worst Heisenbug of the port - stroucki */
{
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log, " %u.%.3u:", now.sec, now.usec/1000);
+ fprintf(rx_Log, " %u.%.3u:", (unsigned int) now.sec, (unsigned int) now.usec/1000);
fprintf(rx_Log, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15);
putc('\n', rx_Log);
}
}
fprintf(file,
- "rx stats: free packets %d, "
- "allocs %d, ",
- freePackets,
+ "rx stats: free packets %d, allocs %d, ",
+ (int) freePackets,
s->packetRequests);
if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
" \t(these should be small) sendFailed %d, "
"fatalErrors %d\n",
s->netSendFailures,
- s->fatalErrors);
+ (int) s->fatalErrors);
if (s->nRttSamples) {
fprintf(file,
"Burst size %d, "
"burst wait %u.%d.\n",
ntohl(peer->host),
- peer->port,
- peer->burstSize,
- peer->burstWait.sec,
- peer->burstWait.usec);
+ (int) peer->port,
+ (int) peer->burstSize,
+ (int) peer->burstWait.sec,
+ (int) peer->burstWait.usec);
fprintf(file,
" Rtt %d, "
"total sent %d, "
"resent %d\n",
peer->rtt,
- peer->timeout.sec,
- peer->timeout.usec,
+ (int) peer->timeout.sec,
+ (int) peer->timeout.usec,
peer->nSent,
peer->reSends);
"max in packet skew %d, "
"max out packet skew %d\n",
peer->ifMTU,
- peer->inPacketSkew,
- peer->outPacketSkew);
+ (int) peer->inPacketSkew,
+ (int) peer->outPacketSkew);
}
#ifdef AFS_PTHREAD_ENV
if (rx_Log_event) {
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %x, %x)\n", now.sec, now.usec, when->sec, when->usec, func, arg);
+ fprintf(rx_Log_event, "%d.%d: rxevent_Post(%d.%d, %x, %x)\n",
+ (int) now.sec, (int) now.usec, (int) when->sec,
+ (int) when->usec, (unsigned int) func, (unsigned int) arg);
}
#endif
if (rx_Log_event) {
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %x, %x)\n", now.sec,
- now.usec, ev->eventTime.sec, ev->eventTime.usec, ev->func,
- ev->arg);
+ fprintf(rx_Log_event, "%d.%d: rxevent_Cancel_1(%d.%d, %x, %x)\n",
+ (int) now.sec, (int) now.usec, (int) ev->eventTime.sec,
+ (int) ev->eventTime.usec, (unsigned int) ev->func,
+ (unsigned int) ev->arg);
}
#endif
/* Append it to the free list (rather than prepending) to keep the free
} while (queue_IsNotEmpty(&ep->events));
}
#ifdef RXDEBUG
- if (rx_Log_event) fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n", now.sec, now.usec);
+ if (rx_Log_event) fprintf(rx_Log_event, "rxevent_RaiseEvents(%d.%d)\n",
+ (int) now.sec, (int) now.usec);
#endif
rxevent_raiseScheduled = 0;
MUTEX_EXIT(&rxevent_lock);
extern char cml_version_number[];
extern int (*rx_almostSent)();
-void rxi_FreePacketNoLock(struct rx_packet *p);
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- afs_int32 ahost, short aport, afs_int32 istack);
+ afs_int32 ahost, short aport, afs_int32 istack);
-extern char cml_version_number[];
-extern int (*rx_almostSent)();
/* some rules about packets:
* 1. When a packet is allocated, the final iov_buf contains room for
* a security trailer, but iov_len masks that fact. If the security
static struct rx_packet * allocCBuf(int class)
{
struct rx_packet *c;
-#ifndef KERNEL
- extern void rxi_MorePacketsNoLock();
-#endif /* !KERNEL */
SPLVAR;
NETPRI;
*/
void rxi_freeCBuf(struct rx_packet *c)
{
- extern void rxi_PacketsUnWait();
SPLVAR;
NETPRI;
/* Add more packet buffers */
void rxi_MorePackets(int apackets)
{
- extern void rxi_PacketsUnWait();
struct rx_packet *p, *e;
int getme;
SPLVAR;
/* Add more packet buffers */
void rxi_MorePacketsNoLock(int apackets)
{
- extern void rxi_PacketsUnWait();
struct rx_packet *p, *e;
int getme;
struct rx_packet * p;
int first;
{
- extern void rxi_PacketsUnWait();
int length;
struct iovec *iov, *end;
SPLVAR;
* remove it yourself first if you call this routine. */
void rxi_FreePacket(struct rx_packet *p)
{
- extern void rxi_PacketsUnWait();
SPLVAR;
NETPRI;