Remove support for N heaps per size class.
authorRobert Haas <rhaas@postgresql.org>
Thu, 10 Apr 2014 18:00:34 +0000 (18:00 +0000)
committerRobert Haas <rhaas@postgresql.org>
Thu, 10 Apr 2014 18:00:34 +0000 (18:00 +0000)
Profiling shows that this is wicked expensive.

src/backend/utils/mmgr/sb_alloc.c

index 760839bb56f3003906920617fada7293a4b6a963..5220161f01cc4b0c54ba6e289ee686f1f896c37c 100644 (file)
@@ -121,7 +121,7 @@ static void sb_out_of_memory_error(sb_allocator *a);
 static bool sb_transfer_first_span(char *base, sb_heap *heap,
                                           int fromclass, int toclass);
 static bool sb_try_to_steal_superblock(char *base, sb_allocator *a,
-                                                  uint16 heapproc, uint16 size_class);
+                                                  uint16 size_class);
 
 /*
  * Create a backend-private allocator.
@@ -145,7 +145,6 @@ sb_create_private_allocator(void)
                                 errmsg("out of memory")));
 
        a->private = true;
-       a->heaps_per_size_class = 1;
        a->num_size_classes = num_size_classes;
        for (heapno = 0; heapno < num_size_classes; ++heapno)
        {
@@ -193,9 +192,7 @@ sb_alloc(sb_allocator *a, Size size, int flags)
                Size    npages = fpm_size_to_pages(size);
                Size    first_page;
                sb_span *span;
-               int     heapproc = MyProcPid % a->heaps_per_size_class;
-               int     heapno = heapproc * SB_NUM_SIZE_CLASSES + SB_SCLASS_SPAN_LARGE;
-               sb_heap *heap = &a->heaps[heapno];
+               sb_heap *heap = &a->heaps[SB_SCLASS_SPAN_LARGE];
                LWLock *lock = relptr_access(base, heap->lock);
                void *ptr;
 
@@ -282,7 +279,6 @@ void
 sb_reset_allocator(sb_allocator *a)
 {
        char *base = NULL;
-       int     num_heaps;
        int heapno;
 
        /*
@@ -301,8 +297,7 @@ sb_reset_allocator(sb_allocator *a)
         * Iterate through heaps back to front.  We do it this way so that
         * spans-of-spans are freed last.
         */
-       num_heaps = a->heaps_per_size_class * (int) a->num_size_classes;
-       for (heapno = num_heaps; heapno >= 0; --heapno)
+       for (heapno = a->num_size_classes; heapno >= 0; --heapno)
        {
                sb_heap *heap = &a->heaps[heapno];
                Size    fclass;
@@ -467,9 +462,7 @@ sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize, Size nmax)
 static char *
 sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
 {
-       int     heapproc = MyProcPid % a->heaps_per_size_class;
-       int     heapno = heapproc * SB_NUM_SIZE_CLASSES + size_class;
-       sb_heap *heap = &a->heaps[heapno];
+       sb_heap *heap = &a->heaps[size_class];
        LWLock *lock = relptr_access(base, heap->lock);
        char *result = NULL;
        Size    obsize;
@@ -503,8 +496,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
         */
        if (result == NULL)
        {
-               if (a->heaps_per_size_class > 1 &&
-                       sb_try_to_steal_superblock(base, a, heapproc, size_class))
+               if (sb_try_to_steal_superblock(base, a, size_class))
                {
                        /* The superblock we stole shouldn't full, so this should work. */
                        result = sb_alloc_from_heap(base, heap, obsize, nmax);
@@ -654,8 +646,7 @@ sb_transfer_first_span(char *base, sb_heap *heap, int fromclass, int toclass)
  * Returns true if we succeed in stealing one, and false if not.
  */
 static bool
-sb_try_to_steal_superblock(char *base, sb_allocator *a,
-                                                  uint16 heapproc, uint16 size_class)
+sb_try_to_steal_superblock(char *base, sb_allocator *a, uint16 size_class)
 {
        /* XXX */
        return false;