more of sb_alloc, still missing some guts
authorRobert Haas <rhaas@postgresql.org>
Fri, 21 Mar 2014 18:35:56 +0000 (14:35 -0400)
committerRobert Haas <rhaas@postgresql.org>
Fri, 21 Mar 2014 18:35:56 +0000 (14:35 -0400)
src/backend/utils/mmgr/sb_alloc.c

index 8d441a2c342cd0bf9024d900c9b5cb38a4da9abc..a2f5b0139053e803cac50348e6933b7ba9ed3f0a 100644 (file)
 #include "miscadmin.h"
 #include "utils/sb_region.h"
 
+/*
+ * Metadata for an ordinary superblock, a large memory allocation, or a "span
+ * of spans".
+ *
+ * For ordinary superblocks and large memory allocations, span objects are
+ * stored out-of-line; that is, the span object is not stored within the
+ * span itself.  Ordinary superblocks are all of size SB_SUPERBLOCK_SIZE,
+ * and size_class indicates the size of object they contain.  Large memory
+ * spans contain just enough pages to store the object, and size_class
+ * is SB_SPAN_LARGE; ninitialized, nused, and firstfree are all unused, as
+ * the whole span consists of a single object.
+ * 
+ * For a "span of spans", the span object is stored "inline".  The allocation
+ * is always exactly one page, and the sb_span object is located at the
+ * beginning of that page.  This makes it easy to free a span: just find the
+ * start of the containing page, and there's the sb_span to which it needs to
+ * be returned.  The size class will be SB_SPAN_OF_SPANS, and the remaining
+ * fields are used just as they would be in an ordinary superblock.  We can't
+ * allocate spans out of ordinary superblocks because creating an ordinary
+ * superblock requires us to be able to allocate a span *first*.  Doing it
+ * this way avoids that circularity.
+ */
+struct sb_span
+{
+       relptr(sb_heap) parent;         /* Containing heap. */
+       relptr(sb_span) prevspan;       /* Previous span. */
+       relptr(sb_span) nextspan;       /* Next span. */
+       Size            first_page;             /* Starting page number. */
+       Size            npages;                 /* Length of span in pages. */
+       uint16          size_class;             /* Size class. */
+       uint16          ninitialized;   /* Maximum number of objects ever allocated. */
+       uint16          nused;                  /* Number of objects currently allocated. */
+       uint16          firstfree;              /* First object on free list. */
+};
+
 /*
  * Small allocations are handled by dividing a relatively large chunk of
  * memory called a superblock into many small objects of equal size.  The
@@ -72,10 +107,18 @@ static char sb_size_class_map[] = {
 #define SB_SCLASS_SPAN_OF_SPANS                        0
 #define SB_SCLASS_SPAN_LARGE                   1
 #define SB_SCLASS_FIRST_REGULAR                        2
+#define SB_NUM_SIZE_CLASSES \
+       (SB_SCLASS_FIRST_REGULAR + lengthof(sb_size_classes))
 
 /* Helper functions. */
-static char *sb_alloc_guts(sb_region *region, sb_allocator *a, int size_class);
+static char *sb_alloc_from_heap(char *base, sb_heap *heap);
+static char *sb_alloc_guts(char *base, sb_region *region,
+                         sb_allocator *a, int size_class);
+static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
+                        Size first_page, Size npages, uint16 size_class);
 static void sb_out_of_memory_error(sb_allocator *a);
+static bool sb_try_to_steal_superblock(char *base, sb_allocator *a,
+                                                  uint16 heapproc, uint16 size_class);
 
 /*
  * Create a backend-private allocator.
@@ -121,7 +164,7 @@ sb_alloc(sb_allocator *a, Size size, int flags)
 {
        sb_region *region = NULL;
        char *base = NULL;
-       int             size_class;
+       uint16  size_class;
        char   *result;
 
        /*
@@ -145,9 +188,14 @@ sb_alloc(sb_allocator *a, Size size, int flags)
                Size    npages = fpm_size_to_pages(size);
                Size    first_page;
                sb_span *span;
+               int     heapproc = MyProcPid % a->heaps_per_size_class;
+               int     heapno = heapproc * SB_NUM_SIZE_CLASSES + size_class;
+               sb_heap *heap = &a->heaps[heapno];
+               LWLock *lock = relptr_access(base, heap->lock);
 
                /* Obtain a span object. */
-               span = (sb_span *) sb_alloc_guts(region, a, SB_SCLASS_SPAN_OF_SPANS);
+               span = (sb_span *) sb_alloc_guts(base, region, a,
+                                                                                SB_SCLASS_SPAN_OF_SPANS);
                if (span == NULL)
                {
                        if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
@@ -169,8 +217,12 @@ sb_alloc(sb_allocator *a, Size size, int flags)
                        return NULL;
                }
 
-               /* XXX. Put the span on the large object heap! */
-
+               /* Initialize span and pagemap. */
+               if (lock != NULL)
+                       LWLockAcquire(lock, LW_EXCLUSIVE);
+               sb_init_span(base, span, heap, first_page, npages, size_class);
+               if (lock != NULL)
+                       LWLockRelease(lock);
                sb_map_set(region->pagemap, first_page, ((char *) span) - base);
 
                return fpm_page_to_pointer(fpm_segment_base(region->fpm),
@@ -208,22 +260,146 @@ sb_alloc(sb_allocator *a, Size size, int flags)
        size_class += SB_SCLASS_FIRST_REGULAR;
 
        /* Attempt the actual allocation. */
-       result = sb_alloc_guts(region, a, size_class);
+       result = sb_alloc_guts(base, region, a, size_class);
        if (result == NULL && (flags & SB_ALLOC_SOFT_FAIL) == 0)
                sb_out_of_memory_error(a);
        return result;          
 }
 
 /*
- * Guts of the memory allocation routine.
+ * Allocate an object from the provided heap.  Caller is responsible for
+ * any required locking.
  */
 static char *
-sb_alloc_guts(sb_region *region, sb_allocator *a, int size_class)
+sb_alloc_from_heap(char *base, sb_heap *heap)
 {
        /* XXX */
        return NULL;
 }
 
+/*
+ * Allocate an object of the requeted size class from the given allocator.
+ * If necessary, steal or create another superblock.
+ */
+static char *
+sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
+{
+       int     heapproc = MyProcPid % a->heaps_per_size_class;
+       int     heapno = heapproc * SB_NUM_SIZE_CLASSES + size_class;
+       sb_heap *heap = &a->heaps[heapno];
+       LWLock *lock = relptr_access(base, heap->lock);
+       char *result = NULL;
+
+       /* If locking is in use, acquire the lock. */
+       if (lock != NULL)
+               LWLockAcquire(lock, LW_EXCLUSIVE);
+
+       /* Attempt to allocate from the heap. */
+       result = sb_alloc_from_heap(base, heap);
+
+       /*
+        * If there's no space in the current heap, but there are multiple heaps
+        * per size class, we can attempt to grab a superblock from some other
+        * heap.  Otherwise, we'll need to attempt to create a new superblock.
+        */
+       if (result == NULL)
+       {
+               if (a->heaps_per_size_class > 1 &&
+                       sb_try_to_steal_superblock(base, a, heapproc, size_class))
+               {
+                       /* The superblock we stole shouldn't full, so this should work. */
+                       result = sb_alloc_from_heap(base, heap);
+                       Assert(result != NULL);
+               }
+               else
+               {
+                       sb_span *span = NULL;
+                       Size    npages = 1;
+                       Size    first_page;
+                       Size    i;
+
+                       /*
+                        * Get an sb_span object to describe the new superblock... unless
+                        * this allocation is for an sb_span object, in which case that's
+                        * surely not going to work.  We handle that case by storing the
+                        * sb_span describing an sb_span superblock inline.
+                        */
+                       if (size_class != SB_SCLASS_SPAN_OF_SPANS)
+                       {
+                               span = (sb_span *) sb_alloc_guts(base, region, a,
+                                                                                                SB_SCLASS_SPAN_OF_SPANS);
+                               if (span == NULL)
+                                       return NULL;
+                               npages = SB_PAGES_PER_SUPERBLOCK;
+                       }
+
+                       /* Find a region from which to allocate the superblock. */
+                       if (region == NULL)
+                               region = sb_private_region_for_allocator(npages);
+
+                       /* Try to allocate the actual superblock. */
+                       if (region == NULL ||
+                               !FreePageManagerGet(region->fpm, npages, &first_page))
+                       {
+                               /* XXX. Free the span, if any. */
+                               return NULL;
+                       }
+
+                       /*
+                        * If this is a span-of-spans, carve the descriptor right out of
+                        * the allocated space.
+                        */
+                       if (size_class == SB_SCLASS_SPAN_OF_SPANS)
+                       {
+                               char *fpm_base = fpm_segment_base(region->fpm);
+                               span = (sb_span *) fpm_page_to_pointer(fpm_base, first_page);
+                       }
+
+                       /* Initialize span and pagemap. */
+                       sb_init_span(base, span, heap, first_page, npages, size_class);
+                       for (i = 0; i < npages; ++i)
+                               sb_map_set(region->pagemap, first_page + i,
+                                                  ((char *) span) - base);
+
+                       /* For a span-of-spans, record that we allocated ourselves. */
+                       if (size_class == SB_SCLASS_SPAN_OF_SPANS)
+                               span->ninitialized = span->nused = 1;
+
+                       /* This should work now. */
+                       result = sb_alloc_from_heap(base, heap);
+                       Assert(result != NULL);
+               }
+       }
+
+       /* We're all done.  Release the lock. */
+       if (lock != NULL)
+               LWLockRelease(lock);
+
+       return NULL;
+}
+
+/*
+ * Add a new span to fullness class 1 of the indicated heap.
+ */
+static void
+sb_init_span(char *base, sb_span *span, sb_heap *heap, Size first_page,
+                        Size npages, uint16 size_class)
+{
+       sb_span *head = relptr_access(base, heap->spans[1]);
+
+       if (head != NULL)
+               relptr_store(base, head->prevspan, span);
+       relptr_store(base, span->parent, heap);
+       relptr_store(base, span->nextspan, head);
+       relptr_store(base, span->prevspan, (sb_span *) NULL);
+       span->first_page = first_page;
+       span->npages = npages;
+       span->size_class = size_class;
+       span->ninitialized = 0;
+       span->nused = 0;
+       span->firstfree = 0;
+}
+
 /*
  * Report an out-of-memory condition.
  */
@@ -239,3 +415,16 @@ sb_out_of_memory_error(sb_allocator *a)
                                (errcode(ERRCODE_OUT_OF_MEMORY),
                                 errmsg("out of shared memory")));
 }
+
+/*
+ * Try to steal a superblock from another heap for the same size class,
+ * to avoid wasting too much memory in concurrent allocaton scenarios.
+ * Returns true if we succeed in stealing one, and false if not.
+ */
+static bool
+sb_try_to_steal_superblock(char *base, sb_allocator *a,
+                                                  uint16 heapproc, uint16 size_class)
+{
+       /* XXX */
+       return false;
+}