From 513d6a3e35f8c35178e0a22428d616751251b51e Mon Sep 17 00:00:00 2001 From: Simon Wilkinson Date: Tue, 28 Sep 2010 23:37:54 +0100 Subject: [PATCH] rx: Add atomic operations code Add support for an atomic type, and atomic operators for RX. This builds on work which has already been done for Windows, where InterlockedOperations are used for statistics gathering. A new opaque type, rx_atomic_t is introduced so that normal arithmetic operations will fail on atomic data. An implementation using native atomic methods is provided for Darwin, Solaris and Windows. A native kernel implementation is used for Linux. Where OpenAFS is built with a sufficiently modern gcc, gcc's atomic primitives will be used. Sadly, gcc's builtin operations are not available for i386, they will only be used with builds the set -march=i486 (or later). Otherwise, we fall back to a single mutex which protects all atomic operations. Change-Id: I5f69677a80617e3936f82b177cd58250a6dbf31f Reviewed-on: http://gerrit.openafs.org/2858 Tested-by: BuildBot Reviewed-by: Jeffrey Altman Tested-by: Jeffrey Altman --- acinclude.m4 | 10 ++ src/rx/rx.c | 8 ++ src/rx/rx_atomic.h | 273 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 291 insertions(+) create mode 100644 src/rx/rx_atomic.h diff --git a/acinclude.m4 b/acinclude.m4 index 98c6771dd..276cacec4 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -981,6 +981,16 @@ dnl Linux-only, but just enable always. AC_DEFINE(AFS_CACHE_BYPASS, 1, [define to activate cache bypassing Unix client]) esac +AC_CACHE_CHECK([if compiler has __sync_add_and_fetch], + [ac_cv_sync_fetch_and_add], + [AC_TRY_LINK(, [int var; return __sync_add_and_fetch(&var, 1);], + [ac_cv_sync_fetch_and_add=yes], + [ac_cv_sync_fetch_and_add=no]) +]) +AS_IF([test "$ac_cv_sync_fetch_and_add" = "yes"], + [AC_DEFINE(HAVE_SYNC_FETCH_AND_ADD, 1, + [define if your C compiler has __sync_add_and_fetch])]) + AC_CACHE_CHECK([if struct sockaddr has sa_len field], [ac_cv_sockaddr_len], [AC_TRY_COMPILE( [#include diff --git a/src/rx/rx.c b/src/rx/rx.c index ce7d8361e..267ebcb17 100644 --- a/src/rx/rx.c +++ b/src/rx/rx.c @@ -69,6 +69,7 @@ #include "rx.h" #include "rx_globals.h" #include "rx_trace.h" +#include "rx_atomic.h" #define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */ #define AFSOP_STOP_AFS 211 /* Stop AFS process */ #define AFSOP_STOP_BKG 212 /* Stop BKG process */ @@ -104,6 +105,7 @@ extern afs_int32 afs_termState; # include "rx_user.h" # include "rx_clock.h" # include "rx_queue.h" +# include "rx_atomic.h" # include "rx_globals.h" # include "rx_trace.h" # include @@ -156,6 +158,10 @@ static unsigned int rxi_rpc_process_stat_cnt; #include /* for definition of offsetof() */ #endif +#ifdef RX_ENABLE_LOCKS +afs_kmutex_t rx_atomic_mutex; +#endif + #ifdef AFS_PTHREAD_ENV #include @@ -196,6 +202,7 @@ rxi_InitPthread(void) MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0); MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0); MUTEX_INIT(&rx_waiting_mutex, "waiting", MUTEX_DEFAULT, 0); + MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0); MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0); MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0); MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0); @@ -346,6 +353,7 @@ struct rx_connection *rxLastConn = 0; * multi_handle->lock * rxevent_lock * rx_stats_mutex + * rx_atomic_mutex * * Do we need a lock to protect the peer field in the conn structure? * conn->peer was previously a constant for all intents and so has no diff --git a/src/rx/rx_atomic.h b/src/rx/rx_atomic.h new file mode 100644 index 000000000..0193455e3 --- /dev/null +++ b/src/rx/rx_atomic.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2010 Your Filesystem Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define RX_ATOMIC_INIT(i) { (i) } + +#ifdef AFS_NT40_ENV +typedef struct { + volatile int var; +} rx_atomic_t; + +static_inline void +rx_atomic_set(rx_atomic_t *atomic, int val) { + atomic->var = val; +} + +static_inline int +rx_atomic_read(rx_atomic_t *atomic) { + return atomic->var; +} + +static_inline void +rx_atomic_inc(rx_atomic_t *atomic) { + InterlockedIncrement(&atomic->var); +} + +static_inline int +rx_atomic_inc_and_read(rx_atomic_t *atomic) { + return InterlockedIncrement(&atomic->var); +} + +static_inline void +rx_atomic_add(rx_atomic_t *atomic, int change) { + InterlockedExchangeAdd(&atomic->var, change); +} + +static_inline void +rx_atomic_dec(rx_atomic_t *atomic) { + InterlockedDecrement(&atomic->var); +} + +static_inline void +rx_atomic_sub(rx_atomic_t *atomic, int change) { + InterlockedExchangeAdd(&atomic->var, 0 - change); +} + +#elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV) + +#include +typedef struct { + volatile int var; +} rx_atomic_t; + +static_inline void +rx_atomic_set(rx_atomic_t *atomic, int val) { + atomic->var = val; +} + +static_inline int +rx_atomic_read(rx_atomic_t *atomic) { + return atomic->var; +} + +static_inline void +rx_atomic_inc(rx_atomic_t *atomic) { + OSAtomicIncrement32(&atomic->var); +} + +static_inline int +rx_atomic_inc_and_read(rx_atomic_t *atomic) { + return OSAtomicIncrement32(&atomic->var); +} + +static_inline void +rx_atomic_add(rx_atomic_t *atomic, int change) { + OSAtomicAdd32(change, &atomic->var); +} + +static_inline void +rx_atomic_dec(rx_atomic_t *atomic) { + OSAtomicDecrement32(&atomic->var); +} + +static_inline void +rx_atomic_sub(rx_atomic_t *atomic, int change) { + OSAtomicAdd32(0 - change, &atomic->var); +} +#elif defined(AFS_LINUX20_ENV) && defined(KERNEL) +#include + +typedef atomic_t rx_atomic_t; + +#define rx_atomic_set(X) atomic_set(X) +#define rx_atomic_read(X) atomic_read(X) +#define rx_atomic_inc(X) atomic_inc(X) +#define rx_atomic_inc_and_read(X) atomic_inc_return(X) +#define rx_atomic_add(X, V) atomic_add(X, V) +#define rx_atomic_dec(X) atomic_dec(X) +#define rx_atomic_sub(X, V) atomic_sub(X, V) + +#elif defined(AFS_SUN58_ENV) +typedef struct { + volatile int var; +} rx_atomic_t; + +static_inline void +rx_atomic_set(rx_atomic_t *atomic, int val) { + atomic->var = val; +} + +static_inline int +rx_atomic_read(rx_atomic_t *atomic) { + return atomic->var; +} + +static_inline void +rx_atomic_inc(rx_atomic_t *atomic) { + atomic_inc_32(&atomic->var); +} + +static_inline int +rx_atomic_inc_and_read(rx_atomic_t *atomic) { + return atomic_inc_32_nv(&atomic->var); +} + +static_inline void +rx_atomic_add(rx_atomic_t *atomic, int change) { + atomic_add_32(&atomic->var, change); +} + +static_inline void +rx_atomic_dec(rx_atomic_t *atomic) { + atomic_dec_32(&atomic->var); +} + +static_inline void +rx_atomic_sub(rx_atomic_t *atomic, int change) { + atomic_add_32(&object, 0 - change); +} + +#elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD) + +typedef struct { + volatile int var; +} rx_atomic_t; + +static_inline void +rx_atomic_set(rx_atomic_t *atomic, int val) { + atomic->var = val; +} + +static_inline int +rx_atomic_read(rx_atomic_t *atomic) { + return atomic->var; +} + +static_inline void +rx_atomic_inc(rx_atomic_t *atomic) { + (void)__sync_fetch_and_add(&atomic->var, 1); +} + +static_inline int +rx_atomic_inc_and_read(rx_atomic_t *atomic) { + return __sync_add_and_fetch(&atomic->var, 1); +} + +static_inline void +rx_atomic_add(rx_atomic_t *atomic, int change) { + (void)__sync_fetch_and_add(&atomic->var, change); +} + +static_inline void +rx_atomic_dec(rx_atomic_t *atomic) { + (void)__sync_fetch_and_sub(&atomic->var, 1); +} + +static_inline void +rx_atomic_sub(rx_atomic_t *atomic, int change) { + (void)__sync_fetch_and_sub(&atomic->var, change); +} + +#else + +/* If we're on a platform where we have no idea how to do atomics, + * then we fall back to using a single process wide mutex to protect + * all atomic variables. This won't be the quickest thing ever. + */ + +#ifdef RX_ENABLE_LOCKS +extern afs_kmutex_t rx_atomic_mutex; +#endif + +typedef struct { + int var; +} rx_atomic_t; + +static_inline void +rx_atomic_set(rx_atomic_t *atomic, int val) { + MUTEX_ENTER(&rx_atomic_mutex); + atomic->var = val; + MUTEX_EXIT(&rx_atomic_mutex); +} + +static_inline int +rx_atomic_read(rx_atomic_t *atomic) { + int out; + + MUTEX_ENTER(&rx_atomic_mutex); + out = atomic->var; + MUTEX_EXIT(&rx_atomic_mutex); + + return out; +} + +static_inline void +rx_atomic_inc(rx_atomic_t *atomic) { + MUTEX_ENTER(&rx_atomic_mutex); + atomic->var++; + MUTEX_EXIT(&rx_atomic_mutex); +} + +static_inline int +rx_atomic_inc_and_read(rx_atomic_t *atomic) { + int retval; + MUTEX_ENTER(&rx_atomic_mutex); + atomic->var++; + retval = atomic->var; + MUTEX_EXIT(&rx_atomic_mutex); + return retval; +} + +static_inline void +rx_atomic_add(rx_atomic_t *atomic, int change) { + MUTEX_ENTER(&rx_atomic_mutex); + atomic->var += change; + MUTEX_EXIT(&rx_atomic_mutex); +} + +static_inline void +rx_atomic_dec(rx_atomic_t *atomic) { + MUTEX_ENTER(&rx_atomic_mutex); + atomic->var--; + MUTEX_EXIT(&rx_atomic_mutex); +} + +static_inline void +rx_atomic_sub(rx_atomic_t *atomic, int change) { + MUTEX_ENTER(&rx_atomic_mutex); + atomic->var -= change; + MUTEX_ENTER(&rx_atomic_mutex); +} + +#endif -- 2.39.5