]> git.michaelhowe.org Git - packages/o/openafs.git/commitdiff
Add tracing to MCAS allocator and GC
authormatt@linuxbox.com <matt@linuxbox.com>
Fri, 28 Aug 2009 15:54:41 +0000 (11:54 -0400)
committerDerrick Brashear <shadow|account-1000005@unknown>
Thu, 3 Sep 2009 19:35:25 +0000 (12:35 -0700)
Adds conditional trace logging to MCAS object cache and supporting GC,
using ViceLog.  While in future this should be normalized, it is correct
for both fileserver and volserver, the two programs using MCAS currently.

LICENSE BSD

Reviewed-on: http://gerrit.openafs.org/374
Reviewed-by: Derrick Brashear <shadow@dementia.org>
Tested-by: Derrick Brashear <shadow@dementia.org>
src/mcas/gc.c
src/mcas/gc.h
src/mcas/osi_mcas_obj_cache.c
src/mcas/osi_mcas_obj_cache.h
src/mcas/skip_cas_adt.c

index 602372299f648462474fcd5e9e427598d6bd851f..79fa011ec5346f5593c0c22694d782bdbef8795d 100644 (file)
@@ -42,6 +42,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include <unistd.h>
 #include "portable_defns.h"
 #include "gc.h"
+
+#include <afsconfig.h>
+#include <afs/param.h>
 #include <afs/afsutil.h>
 
 /*#define MINIMAL_GC*/
@@ -125,6 +128,9 @@ static struct gc_global_st
     int nr_sizes;
     int blk_sizes[MAX_SIZES];
 
+    /* tags (trace support) */
+    char *tags[MAX_SIZES];
+
     /* Registered epoch hooks. */
     int nr_hooks;
     hook_fn_t hook_fns[MAX_HOOKS];
@@ -195,6 +201,9 @@ static chunk_t *alloc_more_chunks(void)
     int i;
     chunk_t *h, *p;
 
+    ViceLog(11, ("GC: alloc_more_chunks alloc %lu chunks\n",
+        CHUNKS_PER_ALLOC));
+
     h = p = ALIGNED_ALLOC(CHUNKS_PER_ALLOC * sizeof(*h));
     if ( h == NULL ) MEM_FAIL(CHUNKS_PER_ALLOC * sizeof(*h));
 
@@ -347,10 +356,14 @@ static void gc_reclaim(void)
     unsigned long curr_epoch;
     chunk_t      *ch, *t;
     int           two_ago, three_ago, i, j;
+
+    ViceLog(11, ("GC: gc_reclaim enter\n"));
  
     /* Barrier to entering the reclaim critical section. */
     if ( gc_global.inreclaim || CASIO(&gc_global.inreclaim, 0, 1) ) return;
 
+    ViceLog(11, ("GC: gc_reclaim after inreclaim barrier\n"));
+
     /*
      * Grab first ptst structure *before* barrier -- prevent bugs
      * on weak-ordered architectures.
@@ -365,6 +378,9 @@ static void gc_reclaim(void)
         if ( (ptst->count > 1) && (ptst->gc->epoch != curr_epoch) ) goto out;
     }
 
+
+    ViceLog(11, ("GC: gc_reclaim all-threads see current epoch\n"));
+
     /*
      * Three-epoch-old garbage lists move to allocation lists.
      * Two-epoch-old garbage lists are cleaned out.
@@ -400,6 +416,30 @@ static void gc_reclaim(void)
             gc->garbage_tail[three_ago][i]->next = ch;
             gc->garbage_tail[three_ago][i] = t;
             t->next = t;
+
+                       /* gc inst: compute and log size of returned list */
+                       {
+                               chunk_t *ch_head, *ch_next;
+                               int r_ix, r_len, r_size;
+                               r_ix = 0;
+                               r_len = 0;
+                               r_size = 0;
+
+                               /* XXX: nonfatal, may be missing multiplier */
+                               ch_head = ch;
+                           do {
+                                       r_len++;
+                               } while (ch->next && (ch->next != ch_head)
+                                                && (ch_next = ch->next));
+
+                               ViceLog(11, ("GC: return %d chunks of size %d to "
+                                                        "gc_global.alloc[%d]\n",
+                                                        r_len,
+                                                        gc_global.blk_sizes[i],
+                                                        i));
+                       }
+
+
             add_chunks_to_list(ch, gc_global.alloc[i]);
         }
 
@@ -414,11 +454,32 @@ static void gc_reclaim(void)
             do { for ( j = 0; j < t->i; j++ ) fn(our_ptst, t->blk[j]); }
             while ( (t = t->next) != ch );
 
+                       /* gc inst: compute and log size of returned list */
+                       {
+                               chunk_t *ch_head, *ch_next;
+                               int r_ix, r_len, r_size;
+                               r_ix = 0;
+                               r_len = 0;
+
+                               /* XXX: nonfatal, may be missing multiplier */
+                               ch_head = ch;
+                           do {
+                                       r_len++;
+                               } while (ch->next && (ch->next != ch_head)
+                                                && (ch_next = ch->next));
+
+                               ViceLog(11, ("GC: return %d chunks to gc_global.free_chunks\n",
+                                                        r_len));
+                       }
+
             add_chunks_to_list(ch, gc_global.free_chunks);
         }
     }
 
     /* Update current epoch. */
+    ViceLog(11, ("GC: gc_reclaim epoch transition (leaving %lu)\n",
+                                curr_epoch));
+
     WMB();
     gc_global.current = (curr_epoch+1) % NR_EPOCHS;
 
@@ -461,6 +522,12 @@ gc_get_blocksize(int alloc_id)
     return (gc_global.blk_sizes[alloc_id]);
 }
 
+int
+gc_get_tag(int alloc_id)
+{
+    return (gc_global.tags[alloc_id]);
+}
+
 static chunk_t *chunk_from_cache(gc_t *gc)
 {
     chunk_t *ch = gc->chunk_cache, *p = ch->next;
@@ -631,7 +698,7 @@ gc_t *gc_init(void)
 
 
 int
-gc_add_allocator(int alloc_size)
+gc_add_allocator(int alloc_size, char *tag)
 {
     int ni, i;
 
@@ -649,6 +716,7 @@ gc_add_allocator(int alloc_size)
     while ((ni = CASIO(&gc_global.nr_sizes, i, i + 1)) != i)
        i = ni;
     gc_global.blk_sizes[i] = alloc_size;
+    gc_global.tags[i] = strdup(tag);
     gc_global.alloc_size[i] = ALLOC_CHUNKS_PER_LIST;
     gc_global.alloc[i] = get_filled_chunks(ALLOC_CHUNKS_PER_LIST, alloc_size);
     return i;
index 4872e0a6ab7da71bcd5eb0b405593b5c79381588..33f71c9527a3bb5816acaf892aaf1b7ac1d4db8a 100644 (file)
@@ -39,7 +39,7 @@ typedef struct gc_st gc_t;
 /* Initialise GC section of given per-thread state structure. */
 gc_t *gc_init(void);
 
-int gc_add_allocator(int alloc_size);
+int gc_add_allocator(int alloc_size, char *tag);
 void gc_remove_allocator(int alloc_id);
 
 /*
index 96396203672e73b59dfb17a8931e22e8e298e64d..f1c269aab49555913d6dab016d77dac8b75f69a9 100644 (file)
@@ -1,17 +1,21 @@
 #include "osi_mcas_obj_cache.h"
+#include <osi/osi_includes.h>
+#include <osi/osi_types.h>
 #include <afs/afsutil.h>
 
 void
-osi_mcas_obj_cache_create(osi_mcas_obj_cache_t * gc_id, size_t size)
+osi_mcas_obj_cache_create(osi_mcas_obj_cache_t * gc_id, size_t size,
+       char *tag)
 {
     ViceLog(7,
            ("osi_mcas_obj_cache_create: size, adjsize %d\n", size,
             size + sizeof(int *)));
 
-    *(int *)gc_id = gc_add_allocator(size + sizeof(int *));
+    *(int *)gc_id = gc_add_allocator(size + sizeof(int *), tag);
 }
 
 void gc_trace(int alloc_id);
+int gc_get_tag(int alloc_id);
 int gc_get_blocksize(int alloc_id);
 
 void *
@@ -20,14 +24,22 @@ osi_mcas_obj_cache_alloc(osi_mcas_obj_cache_t gc_id)
     ptst_t *ptst;
     void *obj;
 
-#if MCAS_ALLOC_DISABLED
-#warning XXXXX mcas allocator cache is DISABLED for debugging!!
-    obj = malloc(gc_get_blocksize(gc_id));
-#else
     ptst = critical_enter();
     obj = (void *)gc_alloc(ptst, gc_id);
     critical_exit(ptst);
+
+    ViceLog(11,
+                       ("GC: osi_mcas_obj_cache_alloc: block of size %d "
+#if OSI_DATAMODEL_IS(OSI_ILP32_ENV)
+                        "0x%lx"
+#else
+                        "0x%llx"
 #endif
+                        " (%s)\n",
+                        gc_get_blocksize(gc_id),
+                        obj,
+                        gc_get_tag(gc_id)));
+
     return (obj);
 }
 
@@ -36,13 +48,21 @@ osi_mcas_obj_cache_free(osi_mcas_obj_cache_t gc_id, void *obj)
 {
     ptst_t *ptst;
 
-#if MCAS_ALLOC_DISABLED
-#warning XXXXX mcas allocator cache is DISABLED for debugging!!
+    ViceLog(11,
+                       ("GC: osi_mcas_obj_cache_free: block of size %d "
+#if OSI_DATAMODEL_IS(OSI_ILP32_ENV)
+                        "0x%lx"
 #else
+                        "0x%llx"
+#endif
+                        " (%s)\n",
+                        gc_get_blocksize(gc_id),
+                        obj,
+                        gc_get_tag(gc_id)));
+
     ptst = critical_enter();
     gc_free(ptst, (void *)obj, gc_id);
     critical_exit(ptst);
-#endif
 }
 
 void
index b02e200d799b3452cbaff7db29b158bef3133670..cc1d63ba4b8b078c988839679aee4edde7bd3790 100644 (file)
@@ -10,7 +10,8 @@ typedef int osi_mcas_obj_cache_t;
 
 /* Create a new MCAS GC pool, and return its identifier, which
  * follows future calls */
-void osi_mcas_obj_cache_create(osi_mcas_obj_cache_t * gc_id, size_t size);     /* alignment? */
+void osi_mcas_obj_cache_create(osi_mcas_obj_cache_t * gc_id, size_t size,
+       char *tag);     /* alignment? */
 
 /* Allocate an object from the pool identified by
  * gc_id */
index b677dd9edbcf54f3cabd192139bf945f362a51e8..6c0cf64b00772404370258324cfbc6aa7a105fc1 100644 (file)
@@ -308,7 +308,8 @@ _init_osi_cas_skip_subsystem(void)
     int i;
 
     for (i = 0; i < NUM_LEVELS; i++) {
-       gc_id[i] = gc_add_allocator(sizeof(node_t) + i * sizeof(node_t *));
+               gc_id[i] = gc_add_allocator(sizeof(node_t) + i * sizeof(node_t *),
+                       "cas_skip_level");
     }
 }