[svn:parrot] r48485 - trunk/src/gc

chromatic at svn.parrot.org chromatic at svn.parrot.org
Sat Aug 14 00:49:00 UTC 2010


Author: chromatic
Date: Sat Aug 14 00:49:00 2010
New Revision: 48485
URL: https://trac.parrot.org/parrot/changeset/48485

Log:
Merged gc_threshold_tuning branch into trunk.

Modified:
   trunk/src/gc/alloc_resources.c
   trunk/src/gc/gc_ms.c
   trunk/src/gc/gc_private.h
   trunk/src/gc/mark_sweep.c

Modified: trunk/src/gc/alloc_resources.c
==============================================================================
--- trunk/src/gc/alloc_resources.c	Sat Aug 14 00:48:56 2010	(r48484)
+++ trunk/src/gc/alloc_resources.c	Sat Aug 14 00:49:00 2010	(r48485)
@@ -321,8 +321,11 @@
          * TODO pass required allocation size to the GC system,
          *      so that collection can be skipped if needed
          */
+        size_t new_mem = mem_pools->memory_used -
+                         mem_pools->mem_used_last_collect;
         if (!mem_pools->gc_mark_block_level
-        &&   mem_pools->mem_allocs_since_last_collect) {
+            && new_mem > (mem_pools->mem_used_last_collect >> 1)
+            && new_mem > GC_SIZE_THRESHOLD) {
             Parrot_gc_mark_and_sweep(interp, GC_trace_stack_FLAG);
 
             if (interp->gc_sys->sys_type != INF) {
@@ -360,6 +363,7 @@
     return_val             = pool->top_block->top;
     pool->top_block->top  += size;
     pool->top_block->free -= size;
+    mem_pools->memory_used += size;
 
     return return_val;
 }
@@ -454,15 +458,19 @@
     if (mem_pools->gc_sweep_block_level)
         return;
 
+    ++mem_pools->gc_collect_runs;
+
+    /* Snag a block big enough for everything */
+    total_size = pad_pool_size(pool);
+
+    if (total_size == 0)
+        return;
+
     ++mem_pools->gc_sweep_block_level;
 
     /* We're collecting */
     mem_pools->mem_allocs_since_last_collect    = 0;
     mem_pools->header_allocs_since_last_collect = 0;
-    ++mem_pools->gc_collect_runs;
-
-    /* Snag a block big enough for everything */
-    total_size = pad_pool_size(pool);
 
     alloc_new_block(mem_pools, total_size, pool, "inside compact");
 
@@ -512,6 +520,7 @@
     /* How much is free. That's the total size minus the amount we used */
     new_block->free = new_block->size - (cur_spot - new_block->start);
     mem_pools->memory_collected +=      (cur_spot - new_block->start);
+    mem_pools->memory_used      +=      (cur_spot - new_block->start);
 
     free_old_mem_blocks(mem_pools, pool, new_block, total_size);
 
@@ -526,6 +535,9 @@
 size minus the reclaimable size. Add a minimum block to the current amount, so
 we can avoid having to allocate it in the future.
 
+Returns 0 if all blocks below the top block are almost full. In this case
+compacting is not needed.
+
 TODO - Big blocks
 
 Currently all available blocks are compacted into one new
@@ -550,21 +562,29 @@
 pad_pool_size(ARGIN(const Variable_Size_Pool *pool))
 {
     ASSERT_ARGS(pad_pool_size)
-    Memory_Block *cur_block = pool->top_block;
+    Memory_Block *cur_block = pool->top_block->prev;
 
     UINTVAL total_size   = 0;
 #if RESOURCE_DEBUG
-    size_t  total_blocks = 0;
+    size_t  total_blocks = 1;
 #endif
 
     while (cur_block) {
-        total_size += cur_block->size - cur_block->freed - cur_block->free;
+        if (!is_block_almost_full(cur_block))
+            total_size += cur_block->size - cur_block->freed - cur_block->free;
         cur_block   = cur_block->prev;
 #if RESOURCE_DEBUG
         ++total_blocks;
 #endif
     }
 
+    if (total_size == 0)
+        return 0;
+
+    cur_block = pool->top_block;
+    if (!is_block_almost_full(cur_block))
+        total_size += cur_block->size - cur_block->freed - cur_block->free;
+
     /* this makes for ever increasing allocations but fewer collect runs */
 #if WE_WANT_EVER_GROWING_ALLOCATIONS
     total_size += pool->minimum_block_size;
@@ -719,6 +739,8 @@
         else {
             /* Note that we don't have it any more */
             mem_pools->memory_allocated -= cur_block->size;
+            mem_pools->memory_used -=
+                cur_block->size - cur_block->free - cur_block->freed;
 
             /* We know the pool body and pool header are a single chunk, so
              * this is enough to get rid of 'em both */
@@ -759,7 +781,7 @@
 is_block_almost_full(ARGIN(const Memory_Block *block))
 {
     ASSERT_ARGS(is_block_almost_full)
-    return (block->free + block->freed) < block->size * 0.2;
+    return 5 * (block->free + block->freed) < block->size;
 }
 
 /*

Modified: trunk/src/gc/gc_ms.c
==============================================================================
--- trunk/src/gc/gc_ms.c	Sat Aug 14 00:48:56 2010	(r48484)
+++ trunk/src/gc/gc_ms.c	Sat Aug 14 00:49:00 2010	(r48485)
@@ -35,11 +35,13 @@
         __attribute__nonnull__(1);
 
 static void gc_ms_add_free_object(SHIM_INTERP,
-    SHIM(Memory_Pools *mem_pools),
+    ARGMOD(Memory_Pools *mem_pools),
     ARGMOD(Fixed_Size_Pool *pool),
     ARGIN(void *to_add))
+        __attribute__nonnull__(2)
         __attribute__nonnull__(3)
         __attribute__nonnull__(4)
+        FUNC_MODIFIES(*mem_pools)
         FUNC_MODIFIES(*pool);
 
 static void gc_ms_alloc_objects(PARROT_INTERP,
@@ -134,11 +136,12 @@
 PARROT_CANNOT_RETURN_NULL
 PARROT_WARN_UNUSED_RESULT
 static void * gc_ms_get_free_object(PARROT_INTERP,
-    ARGIN(Memory_Pools *mem_pools),
+    ARGMOD(Memory_Pools *mem_pools),
     ARGMOD(Fixed_Size_Pool *pool))
         __attribute__nonnull__(1)
         __attribute__nonnull__(2)
         __attribute__nonnull__(3)
+        FUNC_MODIFIES(*mem_pools)
         FUNC_MODIFIES(*pool);
 
 static size_t gc_ms_get_gc_info(PARROT_INTERP, Interpinfo_enum which)
@@ -253,7 +256,8 @@
 #define ASSERT_ARGS_gc_ms_active_sized_buffers __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
        PARROT_ASSERT_ARG(mem_pools))
 #define ASSERT_ARGS_gc_ms_add_free_object __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
-       PARROT_ASSERT_ARG(pool) \
+       PARROT_ASSERT_ARG(mem_pools) \
+    , PARROT_ASSERT_ARG(pool) \
     , PARROT_ASSERT_ARG(to_add))
 #define ASSERT_ARGS_gc_ms_alloc_objects __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
        PARROT_ASSERT_ARG(interp) \
@@ -560,6 +564,7 @@
     ++mem_pools->gc_mark_runs;
     --mem_pools->gc_mark_block_level;
     mem_pools->header_allocs_since_last_collect = 0;
+    mem_pools->mem_used_last_collect = mem_pools->memory_used;
 
     return;
 }
@@ -1098,6 +1103,7 @@
     &&  (pool->top_block->top  == (char *)Buffer_bufstart(buffer) + old_size)) {
         pool->top_block->free -= needed;
         pool->top_block->top  += needed;
+        interp->mem_pools->memory_used += needed;
         Buffer_buflen(buffer)  = newsize;
         return;
     }
@@ -1210,6 +1216,7 @@
     &&  pool->top_block->top  == (char *)Buffer_bufstart(str) + old_size) {
         pool->top_block->free -= needed;
         pool->top_block->top  += needed;
+        interp->mem_pools->memory_used += needed;
         Buffer_buflen(str) = new_size - sizeof (void *);
         return;
     }
@@ -1231,7 +1238,8 @@
 
     /* Decrease usage */
     PARROT_ASSERT(Buffer_pool(str));
-    Buffer_pool(str)->freed  += ALIGNED_STRING_SIZE(Buffer_buflen(str));
+    Buffer_pool(str)->freed += old_size;
+    interp->mem_pools->memory_used -= old_size;
 
     /* copy mem from strstart, *not* bufstart */
     oldmem             = str->strstart;
@@ -1494,12 +1502,15 @@
         ARGMOD(Fixed_Size_Pool *pool))
 {
     ASSERT_ARGS(gc_ms_more_traceable_objects)
+    size_t new_mem = mem_pools->memory_used
+                   - mem_pools->mem_used_last_collect;
 
     if (pool->skip == GC_ONE_SKIP)
         pool->skip = GC_NO_SKIP;
     else if (pool->skip == GC_NEVER_SKIP
          || (pool->skip == GC_NO_SKIP
-         &&  mem_pools->header_allocs_since_last_collect >= GC_SIZE_THRESHOLD))
+         && (new_mem > (mem_pools->mem_used_last_collect >> 1)
+         &&  mem_pools->header_allocs_since_last_collect >= GC_SIZE_THRESHOLD)))
             Parrot_gc_mark_and_sweep(interp, GC_trace_stack_FLAG);
 
     /* requires that num_free_objects be updated in Parrot_gc_mark_and_sweep.
@@ -1523,7 +1534,7 @@
 
 static void
 gc_ms_add_free_object(SHIM_INTERP,
-        SHIM(Memory_Pools *mem_pools),
+        ARGMOD(Memory_Pools *mem_pools),
         ARGMOD(Fixed_Size_Pool *pool),
         ARGIN(void *to_add))
 {
@@ -1534,6 +1545,7 @@
 
     object->next_ptr = pool->free_list;
     pool->free_list  = object;
+    mem_pools->memory_used -= pool->object_size;
 }
 
 /*
@@ -1554,7 +1566,7 @@
 PARROT_WARN_UNUSED_RESULT
 static void *
 gc_ms_get_free_object(PARROT_INTERP,
-        ARGIN(Memory_Pools *mem_pools),
+        ARGMOD(Memory_Pools *mem_pools),
         ARGMOD(Fixed_Size_Pool *pool))
 {
     ASSERT_ARGS(gc_ms_get_free_object)
@@ -1584,6 +1596,7 @@
     }
 
     --pool->num_free_objects;
+    mem_pools->memory_used += pool->object_size;
 
     return ptr;
 }

Modified: trunk/src/gc/gc_private.h
==============================================================================
--- trunk/src/gc/gc_private.h	Sat Aug 14 00:48:56 2010	(r48484)
+++ trunk/src/gc/gc_private.h	Sat Aug 14 00:49:00 2010	(r48485)
@@ -283,6 +283,12 @@
                                    * memory for headers or
                                    * internal structures or
                                    * anything */
+    size_t  memory_used;              /* The total amount of
+                                       * memory used for
+                                       * buffers and headers */
+    size_t  mem_used_last_collect;    /* The total amount of
+                                       * memory used after
+                                       * the last GC run */
     UINTVAL memory_collected;     /* Total amount of memory copied
                                      during collection */
     UINTVAL num_early_gc_PMCs;    /* how many PMCs want immediate destruction */

Modified: trunk/src/gc/mark_sweep.c
==============================================================================
--- trunk/src/gc/mark_sweep.c	Sat Aug 14 00:48:56 2010	(r48484)
+++ trunk/src/gc/mark_sweep.c	Sat Aug 14 00:49:00 2010	(r48485)
@@ -32,11 +32,13 @@
 /* Don't modify between HEADERIZER BEGIN / HEADERIZER END.  Your changes will be lost. */
 
 static void free_buffer(SHIM_INTERP,
-    SHIM(Memory_Pools *mem_pools),
+    ARGMOD(Memory_Pools *mem_pools),
     ARGMOD(Fixed_Size_Pool *pool),
     ARGMOD(Buffer *b))
+        __attribute__nonnull__(2)
         __attribute__nonnull__(3)
         __attribute__nonnull__(4)
+        FUNC_MODIFIES(*mem_pools)
         FUNC_MODIFIES(*pool)
         FUNC_MODIFIES(*b);
 
@@ -78,7 +80,8 @@
         FUNC_MODIFIES(*mem_pools);
 
 #define ASSERT_ARGS_free_buffer __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
-       PARROT_ASSERT_ARG(pool) \
+       PARROT_ASSERT_ARG(mem_pools) \
+    , PARROT_ASSERT_ARG(pool) \
     , PARROT_ASSERT_ARG(b))
 #define ASSERT_ARGS_free_pmc_in_pool __attribute__unused__ int _ASSERT_ARGS_CHECK = (\
        PARROT_ASSERT_ARG(interp) \
@@ -655,7 +658,7 @@
 
 static void
 free_buffer(SHIM_INTERP,
-        SHIM(Memory_Pools *mem_pools),
+        ARGMOD(Memory_Pools *mem_pools),
         ARGMOD(Fixed_Size_Pool *pool),
         ARGMOD(Buffer *b))
 {
@@ -680,7 +683,9 @@
 
             /* We can have shared buffers. Don't count them (yet) */
             if (!(*buffer_flags & Buffer_shared_FLAG)) {
-                block->freed  += ALIGNED_STRING_SIZE(Buffer_buflen(b));
+                size_t size = ALIGNED_STRING_SIZE(Buffer_buflen(b));
+                block->freed += size;
+                mem_pools->memory_used -= size;
             }
 
         }


More information about the parrot-commits mailing list