/*-------------------------------------------------------------------------
*
* balloc.c
- * Superblock-based memory allocator.
+ * Block-based memory allocator.
+ *
+ * When using this allocator, memory is allocated from the operating
+ * system in fairly large regions called AllocatorRegions, from 1 to 64MB
+ * in size. These are then carved up into smaller blocks whose sizes are
+ * are multiples of FPM_PAGE_SIZE (4kB). Allocations of 8kB or less are
+ * binned by size class and then satisfied from 64kB blocks carved up into
+ * equal-sized objects. Large requests result in the creation of a
+ * separate block for each allocation. Blocks are described by
+ * BlockAllocatorSpan objects which are allocated using more or less
+ * the same technique, except with smaller blocks (4kB) and some tricks
+ * to avoid circular dependencies.
+ *
+ * This system should be extremely memory-efficient for large numbers of
+ * allocations, both because the size classes are relatively fine-grained
+ * and because there is no per-chunk overhead. However, it can waste
+ * significant space when the number of allocations is small, because
+ * it's possible to end up with a bunch of mostly-empty 64kB blocks for
+ * different size classes. That fixed overhead doesn't matter much when
+ * allocating a lot of memory, but it can be significant for small
+ * contexts.
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
#include "miscadmin.h"
#include "utils/aregion.h"
-typedef struct sb_heap sb_heap;
-typedef struct sb_span sb_span;
+typedef struct BlockAllocatorHeap BlockAllocatorHeap;
+typedef struct BlockAllocatorSpan BlockAllocatorSpan;
/*
- * Metadata for an ordinary superblock, a large memory allocation, or a "span
- * of spans".
+ * Metadata for one block.
*
- * For ordinary superblocks and large memory allocations, span objects are
- * stored out-of-line; that is, the span object is not stored within the
- * span itself. Ordinary superblocks are all of size SB_SUPERBLOCK_SIZE,
- * and size_class indicates the size of object they contain. Large memory
- * spans contain just enough pages to store the object, and size_class
- * is SB_SCLASS_SPAN_LARGE; ninitialized, nused, and firstfree are all unused,
- * as the whole span consists of a single object.
- *
- * For a "span of spans", the span object is stored "inline". The allocation
- * is always exactly one page, and the sb_span object is located at the
- * beginning of that page. This makes it easy to free a span: just find the
- * start of the containing page, and there's the sb_span to which it needs to
- * be returned. The size class will be SB_SPAN_OF_SPANS, and the remaining
- * fields are used just as they would be in an ordinary superblock. We can't
- * allocate spans out of ordinary superblocks because creating an ordinary
- * superblock requires us to be able to allocate a span *first*. Doing it
+ * For most blocks, span objects are stored out-of-line; that is, the span
+ * object is not stored within the block itself. But, as an exception, for
+ * a "span of spans", the span object is stored "inline". The allocation
+ * is always exactly one page, and the BlockAllocatorSpan object is located at
+ * the beginning of that page. The size class is BA_SCLASS_BLOCK_OF_SPANS,
+ * and the remaining fields are used just as they would be in an ordinary
+ * block. We can't allocate spans out of ordinary blocks because creating an
+ * ordinary block requires us to be able to allocate a span *first*. Doing it
* this way avoids that circularity.
*/
-struct sb_span
+struct BlockAllocatorSpan
{
- relptr(sb_heap) parent; /* Containing heap. */
- relptr(sb_span) prevspan; /* Previous span. */
- relptr(sb_span) nextspan; /* Next span. */
+ relptr(BlockAllocatorHeap) parent; /* Containing heap. */
+ relptr(BlockAllocatorSpan) prevspan; /* Previous span. */
+ relptr(BlockAllocatorSpan) nextspan; /* Next span. */
relptr(char) start; /* Starting address. */
Size npages; /* Length of span in pages. */
uint16 size_class; /* Size class. */
uint16 fclass; /* Current fullness class. */
};
-#define SB_SPAN_NOTHING_FREE ((uint16) -1)
-#define SB_SUPERBLOCK_SIZE (BLOCK_ALLOCATOR_PAGES_PER_CHUNK * FPM_PAGE_SIZE)
+#define BA_SPAN_NOTHING_FREE ((uint16) -1)
+#define BA_SUPERBLOCK_SIZE (BLOCK_ALLOCATOR_PAGES_PER_CHUNK * FPM_PAGE_SIZE)
/*
- * Small allocations are handled by dividing a relatively large chunk of
- * memory called a superblock into many small objects of equal size. The
- * chunk sizes are defined by the following array. Larger size classes are
- * spaced more widely than smaller size classes. We fudge the spacing for
- * size classes >1k to avoid space wastage: based on the knowledge that we
- * plan to allocate 64k superblocks, we bump the maximum object size up
- * to the largest multiple of 8 bytes that still lets us fit the same
- * number of objects into one superblock.
+ * Small allocations are handled by dividing a single block of memory into
+ * many small objects of equal size. The possible allocation sizess are
+ * defined by the following array. Larger size classes are spaced more widely
+ * than smaller size classes. We fudge the spacing for size classes >1kB to
+ * avoid space wastage: based on the knowledge that we plan to allocate 64kB
+ * blocks, we bump the maximum object size up to the largest multiple of
+ * 8 bytes that still lets us fit the same number of objects into one block.
*
- * NB: Because of this fudging, if the size of a superblock is ever changed,
- * these size classes should be reworked to be optimal for the new size.
+ * NB: Because of this fudging, if we were ever to use differently-sized blocks
+ * for small allocations, these size classes would need to be reworked to be
+ * optimal for the new size.
*
* NB: The optimal spacing for size classes, as well as the size of the
- * superblocks themselves, is not a question that has one right answer.
- * Some allocators (such as tcmalloc) use more closely-spaced size classes
- * than we do here, while others (like aset.c) use more widely-spaced classes.
- * Spacing the classes more closely avoids wasting memory within individual
- * chunks, but also means a larger number of potentially-unfilled superblocks.
- * This system is really only suitable for allocating relatively large amounts
- * of memory, where the unfilled superblocks will be a small percentage of
- * the total allocations.
+ * blocks out of which small objects are allocated, is not a question that has
+ * one right answer. Some allocators (such as tcmalloc) use more
+ * closely-spaced size classes than we do here, while others (like aset.c) use
+ * more widely-spaced classes. Spacing the classes more closely avoids wasting
+ * memory within individual chunks, but also means a larger number of
+ * potentially-unfilled blocks.
*/
-static const uint16 sb_size_classes[] = {
- sizeof(sb_span), 0, /* special size classes */
+static const uint16 balloc_size_classes[] = {
+ sizeof(BlockAllocatorSpan), 0, /* special size classes */
8, 16, 24, 32, 40, 48, 56, 64, /* 8 classes separated by 8 bytes */
80, 96, 112, 128, /* 4 classes separated by 16 bytes */
160, 192, 224, 256, /* 4 classes separated by 32 bytes */
* round the size of the object up to the next multiple of 8 bytes, and then
* index into this array.
*/
-static char sb_size_class_map[] = {
+static char balloc_size_class_map[] = {
2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13,
14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17,
18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25
};
-#define SB_SIZE_CLASS_MAP_QUANTUM 8
+#define BA_SIZE_CLASS_MAP_QUANTUM 8
/* Special size classes. */
-#define SB_SCLASS_SPAN_OF_SPANS 0
-#define SB_SCLASS_SPAN_LARGE 1
-#define SB_NUM_SIZE_CLASSES lengthof(sb_size_classes)
+#define BA_SCLASS_BLOCK_OF_SPANS 0
+#define BA_SCLASS_SPAN_LARGE 1
+#define BA_NUM_SIZE_CLASSES lengthof(balloc_size_classes)
/*
* Superblocks are binned by how full they are. Generally, each fullness
- * class corresponds to one quartile, but the superblock being used for
+ * class corresponds to one quartile, but the block being used for
* allocations is always at the head of the list for fullness class 1,
* regardless of how full it really is.
*
* to bulk release everything allocated using this BlockAllocatorContext, we
* have no other way of finding them.
*/
-#define SB_FULLNESS_CLASSES 4
+#define BA_FULLNESS_CLASSES 4
/*
- * An sb_heap represents a set of allocations of a given size class.
+ * An BlockAllocatorHeap represents a set of allocations of a given size class.
* There can be multiple heaps for the same size class for contention
* avoidance.
*/
-struct sb_heap
+struct BlockAllocatorHeap
{
relptr(LWLock) lock;
- relptr(sb_span) spans[SB_FULLNESS_CLASSES];
+ relptr(BlockAllocatorSpan) spans[BA_FULLNESS_CLASSES];
};
/*
{
bool private;
relptr(LWLock) locks;
- sb_heap heaps[lengthof(sb_size_classes)];
+ BlockAllocatorHeap heaps[lengthof(balloc_size_classes)];
};
/* Helper functions. */
static char *BlockAllocatorAllocGuts(char *base, AllocatorRegion *region,
BlockAllocatorContext *context, int size_class);
-static bool sb_ensure_active_superblock(char *base, AllocatorRegion *region,
- BlockAllocatorContext *context, sb_heap *heap,
+static bool BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region,
+ BlockAllocatorContext *context,
+ BlockAllocatorHeap *heap,
int size_class);
-static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
- char *ptr, Size npages, uint16 size_class);
+static void sb_init_span(char *base, BlockAllocatorSpan *span,
+ BlockAllocatorHeap *heap, char *ptr, Size npages,
+ uint16 size_class);
static void sb_out_of_memory_error(BlockAllocatorContext *context);
-static bool sb_transfer_first_span(char *base, sb_heap *heap,
+static bool sb_transfer_first_span(char *base, BlockAllocatorHeap *heap,
int fromclass, int toclass);
-static void sb_unlink_span(char *base, sb_heap *heap, sb_span *span);
+static void sb_unlink_span(char *base, BlockAllocatorHeap *heap,
+ BlockAllocatorSpan *span);
/*
* Create a backend-private allocator.
char *base = NULL;
allocator_size = offsetof(BlockAllocatorContext, heaps);
- allocator_size += sizeof(sb_heap) * SB_NUM_SIZE_CLASSES;
+ allocator_size += sizeof(BlockAllocatorHeap) * BA_NUM_SIZE_CLASSES;
context = malloc(allocator_size);
if (context == NULL)
ereport(ERROR,
errmsg("out of memory")));
context->private = true;
- for (heapno = 0; heapno < SB_NUM_SIZE_CLASSES; ++heapno)
+ for (heapno = 0; heapno < BA_NUM_SIZE_CLASSES; ++heapno)
{
- sb_heap *heap = &context->heaps[heapno];
+ BlockAllocatorHeap *heap = &context->heaps[heapno];
relptr_store(base, heap->lock, (LWLock *) NULL);
- for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
- relptr_store(base, heap->spans[fclass], (sb_span *) NULL);
+ for (fclass = 0; fclass < BA_FULLNESS_CLASSES; ++fclass)
+ relptr_store(base, heap->spans[fclass],
+ (BlockAllocatorSpan *) NULL);
}
return context;
* region, so finding out that information is essential. For
* backend-private memory allocation, allocators aren't uniquely tied to
* a region; we'll only need to grab a region if we can't allocate out of
- * an existing superblock.
+ * an existing block.
*/
if (!context->private)
{
base = region->region_start;
}
- /* If it's too big for a superblock, just grab a raw run of pages. */
- if (size > sb_size_classes[lengthof(sb_size_classes) - 1])
+ /* If bigger than the largest size class, just grab a raw run of pages. */
+ if (size > balloc_size_classes[lengthof(balloc_size_classes) - 1])
{
Size npages = fpm_size_to_pages(size);
Size first_page;
- sb_span *span;
- sb_heap *heap = &context->heaps[SB_SCLASS_SPAN_LARGE];
+ BlockAllocatorSpan *span;
+ BlockAllocatorHeap *heap = &context->heaps[BA_SCLASS_SPAN_LARGE];
LWLock *lock = relptr_access(base, heap->lock);
void *ptr;
/* Obtain a span object. */
- span = (sb_span *) BlockAllocatorAllocGuts(base, region, context,
- SB_SCLASS_SPAN_OF_SPANS);
+ span = (BlockAllocatorSpan *)
+ BlockAllocatorAllocGuts(base, region, context,
+ BA_SCLASS_BLOCK_OF_SPANS);
if (span == NULL)
{
if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
/* Initialize span and pagemap. */
if (lock != NULL)
LWLockAcquire(lock, LW_EXCLUSIVE);
- sb_init_span(base, span, heap, ptr, npages, SB_SCLASS_SPAN_LARGE);
+ sb_init_span(base, span, heap, ptr, npages, BA_SCLASS_SPAN_LARGE);
if (lock != NULL)
LWLockRelease(lock);
BlockAllocatorMapSet(region->pagemap, first_page, span);
}
/* Map allocation to a size class. */
- if (size < lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM)
+ if (size < lengthof(balloc_size_class_map) * BA_SIZE_CLASS_MAP_QUANTUM)
{
int mapidx;
- mapidx = ((size + SB_SIZE_CLASS_MAP_QUANTUM - 1) /
- SB_SIZE_CLASS_MAP_QUANTUM) - 1;
- size_class = sb_size_class_map[mapidx];
+ mapidx = ((size + BA_SIZE_CLASS_MAP_QUANTUM - 1) /
+ BA_SIZE_CLASS_MAP_QUANTUM) - 1;
+ size_class = balloc_size_class_map[mapidx];
}
else
{
- uint16 min = sb_size_class_map[lengthof(sb_size_class_map) - 1];
- uint16 max = lengthof(sb_size_classes) - 1;
+ uint16 min;
+ uint16 max;
+
+ min = balloc_size_class_map[lengthof(balloc_size_class_map) - 1];
+ max = lengthof(balloc_size_classes) - 1;
while (min < max)
{
uint16 mid = (min + max) / 2;
- uint16 class_size = sb_size_classes[mid];
+ uint16 class_size = balloc_size_classes[mid];
if (class_size < size)
min = mid + 1;
size_class = min;
}
- Assert(size <= sb_size_classes[size_class]);
- Assert(size_class == 0 || size > sb_size_classes[size_class - 1]);
+ Assert(size <= balloc_size_classes[size_class]);
+ Assert(size_class == 0 || size > balloc_size_classes[size_class - 1]);
/* Attempt the actual allocation. */
result = BlockAllocatorAllocGuts(base, region, context, size_class);
AllocatorRegion *region;
char *fpm_base;
char *base = NULL;
- sb_span *span;
+ BlockAllocatorSpan *span;
LWLock *lock = NULL;
- char *superblock;
+ char *block;
Size pageno;
Size obsize;
uint16 size_class;
- /* Locate the containing superblock. */
+ /* Locate the containing span. */
region = LookupAllocatorRegion(ptr);
fpm_base = fpm_segment_base(region->fpm);
pageno = fpm_pointer_to_page(fpm_base, ptr);
*/
if (region->seg != NULL)
{
- sb_heap *heap = relptr_access(fpm_base, span->parent);
+ BlockAllocatorHeap *heap = relptr_access(fpm_base, span->parent);
base = fpm_base;
lock = relptr_access(fpm_base, heap->lock);
if (lock != NULL)
/* Compute the object size. */
size_class = span->size_class;
- obsize = sb_size_classes[size_class];
+ obsize = balloc_size_classes[size_class];
/* If it's a large object, free the entire span. */
- if (size_class == SB_SCLASS_SPAN_LARGE)
+ if (size_class == BA_SCLASS_SPAN_LARGE)
{
- sb_heap *heap = relptr_access(base, span->parent);
+ BlockAllocatorHeap *heap = relptr_access(base, span->parent);
Size first_page;
sb_unlink_span(base, heap, span);
LWLockRelease(lock);
}
- /* Put the object on the superblock's freelist. */
- superblock = relptr_access(base, span->start);
- Assert(((char *) ptr) >= superblock);
- Assert(((char *) ptr) < superblock + SB_SUPERBLOCK_SIZE);
- Assert((((char *) ptr) - superblock) % obsize == 0);
+ /* Put the object on the span's freelist. */
+ block = relptr_access(base, span->start);
+ Assert(((char *) ptr) >= block);
+ Assert(((char *) ptr) < block + BA_SUPERBLOCK_SIZE);
+ Assert((((char *) ptr) - block) % obsize == 0);
* (Size *) ptr = span->firstfree;
- span->firstfree = (((char *) ptr) - superblock) / obsize;
+ span->firstfree = (((char *) ptr) - block) / obsize;
span->nallocatable++;
- if (span->nallocatable == 1 && span->fclass == SB_FULLNESS_CLASSES - 1)
+ if (span->nallocatable == 1 && span->fclass == BA_FULLNESS_CLASSES - 1)
{
- sb_heap *heap = relptr_access(base, span->parent);
- sb_span *new_nextspan;
+ BlockAllocatorHeap *heap = relptr_access(base, span->parent);
+ BlockAllocatorSpan *new_nextspan;
/*
- * The superblock is completely full and is located in the
- * highest-numbered fullness class, which is never scanned for free
- * chunks. We must move it to the next-lower fullness class.
+ * The block is completely full and is located in the highest-numbered
+ * fullness class, which is never scanned for free chunks. We must
+ * move it to the next-lower fullness class.
*/
sb_unlink_span(base, heap, span);
- span->fclass = SB_FULLNESS_CLASSES - 2;
- relptr_copy(span->nextspan, heap->spans[SB_FULLNESS_CLASSES - 2]);
- relptr_store(base, span->prevspan, (sb_span *) NULL);
+ span->fclass = BA_FULLNESS_CLASSES - 2;
+ relptr_copy(span->nextspan, heap->spans[BA_FULLNESS_CLASSES - 2]);
+ relptr_store(base, span->prevspan, (BlockAllocatorSpan *) NULL);
new_nextspan = relptr_access(base,
- heap->spans[SB_FULLNESS_CLASSES - 2]);
+ heap->spans[BA_FULLNESS_CLASSES - 2]);
if (new_nextspan != NULL)
relptr_store(base, new_nextspan->prevspan, span);
- relptr_store(base, heap->spans[SB_FULLNESS_CLASSES - 2], span);
+ relptr_store(base, heap->spans[BA_FULLNESS_CLASSES - 2], span);
}
else if (span->nallocatable == span->nmax && (span->fclass != 1 ||
!relptr_is_null(span->prevspan)))
{
- sb_heap *heap = relptr_access(base, span->parent);
+ BlockAllocatorHeap *heap = relptr_access(base, span->parent);
Size first_page;
/*
- * This entire superblock is free, and it's not the active superblock
+ * This entire block is free, and it's not the active block
* for this size class. Return the memory to the free page manager.
- * We don't do this for the active superblock to prevent hysteresis:
+ * We don't do this for the active block to prevent hysteresis:
* if we repeatedly allocate and free the only chunk in the active
- * superblock, it will be very inefficient if we deallocate and
- * reallocate the superblock every time.
+ * block, it will be very inefficient if we deallocate and
+ * reallocate the block every time.
*/
sb_unlink_span(base, heap, span);
first_page = fpm_pointer_to_page(fpm_base,
FreePageManagerPut(region->fpm, first_page, span->npages);
/*
- * Span-of-spans superblocks store the span which describes them
- * within the superblock itself, so freeing the storage implicitly
- * frees the descriptor also. If this is a superblock of any other
+ * Span-of-spans blocks store the span which describes them
+ * within the block itself, so freeing the storage implicitly
+ * frees the descriptor also. If this is a block of any other
* type, we need to separately free the span object also.
*/
- if (size_class != SB_SCLASS_SPAN_OF_SPANS)
+ if (size_class != BA_SCLASS_BLOCK_OF_SPANS)
BlockAllocatorFree(span);
}
uint16 size_class;
/* Large objects allocate full pages. */
- if (size > sb_size_classes[lengthof(sb_size_classes) - 1])
+ if (size > balloc_size_classes[lengthof(balloc_size_classes) - 1])
return FPM_PAGE_SIZE * fpm_size_to_pages(size);
/* Map request size to a size class. */
- if (size < lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM)
+ if (size < lengthof(balloc_size_class_map) * BA_SIZE_CLASS_MAP_QUANTUM)
{
int mapidx;
- mapidx = ((size + SB_SIZE_CLASS_MAP_QUANTUM - 1) /
- SB_SIZE_CLASS_MAP_QUANTUM) - 1;
- size_class = sb_size_class_map[mapidx];
+ mapidx = ((size + BA_SIZE_CLASS_MAP_QUANTUM - 1) /
+ BA_SIZE_CLASS_MAP_QUANTUM) - 1;
+ size_class = balloc_size_class_map[mapidx];
}
else
{
- uint16 min = sb_size_class_map[lengthof(sb_size_class_map) - 1];
- uint16 max = lengthof(sb_size_classes) - 1;
+ uint16 min;
+ uint16 max;
+
+ min = balloc_size_class_map[lengthof(balloc_size_class_map) - 1];
+ max = lengthof(balloc_size_classes) - 1;
+
while (min < max)
{
uint16 mid = (min + max) / 2;
- uint16 class_size = sb_size_classes[mid];
+ uint16 class_size = balloc_size_classes[mid];
if (class_size < size)
min = mid + 1;
size_class = min;
}
- return sb_size_classes[size_class];
+ return balloc_size_classes[size_class];
}
/*
* there's no bookkeeping overhead associated with any single allocation;
* the only thing we can really reflect here is the fact that allocations
* will be rounded up to the next larger size class (or, for large allocations,
- * to a full FPM page). The storage overhead of the sb_span, BlockAllocatorMap,
- * AllocatorRegion, and FreePageManager structures is typically spread across
- * enough small allocations to make reflecting those costs here difficult.
+ * to a full FPM page). The storage overhead of the BlockAllocatorSpan,
+ * BlockAllocatorMap, AllocatorRegion, and FreePageManager structures is
+ * typically spread across enough small allocations to make reflecting those
+ * costs here difficult.
*
* On the other hand, we also hope that the overhead in question is small
* enough not to matter. The system malloc is not without bookkeeping
{
AllocatorRegion *region;
char *fpm_base;
- sb_span *span;
+ BlockAllocatorSpan *span;
Size pageno;
uint16 size_class;
- /* Locate the containing superblock. */
+ /* Locate the containing block. */
region = LookupAllocatorRegion(ptr);
fpm_base = fpm_segment_base(region->fpm);
pageno = fpm_pointer_to_page(fpm_base, ptr);
/* Work out the size of the allocation. */
size_class = span->size_class;
- if (span->size_class == SB_SCLASS_SPAN_LARGE)
+ if (span->size_class == BA_SCLASS_SPAN_LARGE)
return FPM_PAGE_SIZE * span->npages;
else
- return sb_size_classes[size_class];
+ return balloc_size_classes[size_class];
}
/*
* Iterate through heaps back to front. We do it this way so that
* spans-of-spans are freed last.
*/
- for (heapno = SB_NUM_SIZE_CLASSES - 1; heapno >= 0; --heapno)
+ for (heapno = BA_NUM_SIZE_CLASSES - 1; heapno >= 0; --heapno)
{
- sb_heap *heap = &context->heaps[heapno];
+ BlockAllocatorHeap *heap = &context->heaps[heapno];
int fclass;
- for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
+ for (fclass = 0; fclass < BA_FULLNESS_CLASSES; ++fclass)
{
AllocatorRegion *region;
- char *superblock;
- sb_span *span;
+ char *block;
+ BlockAllocatorSpan *span;
span = relptr_access(base, heap->spans[fclass]);
while (span != NULL)
{
Size offset;
- sb_span *nextspan;
+ BlockAllocatorSpan *nextspan;
- superblock = relptr_access(base, span->start);
+ block = relptr_access(base, span->start);
nextspan = relptr_access(base, span->nextspan);
- region = LookupAllocatorRegion(superblock);
+ region = LookupAllocatorRegion(block);
Assert(region != NULL);
- offset = superblock - fpm_segment_base(region->fpm);
+ offset = block - fpm_segment_base(region->fpm);
Assert(offset % FPM_PAGE_SIZE == 0);
FreePageManagerPut(region->fpm, offset / FPM_PAGE_SIZE,
span->npages);
/*
* Allocate an object of the requested size class from the given allocator.
- * If necessary, steal or create another superblock.
+ * If necessary, steal or create another block.
*/
static char *
BlockAllocatorAllocGuts(char *base, AllocatorRegion *region,
BlockAllocatorContext *context, int size_class)
{
- sb_heap *heap = &context->heaps[size_class];
+ BlockAllocatorHeap *heap = &context->heaps[size_class];
LWLock *lock = relptr_access(base, heap->lock);
- sb_span *active_sb;
- char *superblock;
+ BlockAllocatorSpan *active_sb;
+ char *block;
char *result;
Size obsize;
LWLockAcquire(lock, LW_EXCLUSIVE);
/*
- * If there's no active superblock, we must successfully obtain one or
+ * If there's no active block, we must successfully obtain one or
* fail the request.
*/
if (relptr_is_null(heap->spans[1])
- && !sb_ensure_active_superblock(base, region, context,
- heap, size_class))
+ && !BlockAllocatorEnsureActiveBlock(base, region, context,
+ heap, size_class))
{
if (lock != NULL)
LWLockRelease(lock);
Assert(!relptr_is_null(heap->spans[1]));
/*
- * There should be a superblock in fullness class 1 at this point, and
+ * There should be a block in fullness class 1 at this point, and
* it should never be completely full. Thus we can either pop the
* free list or, failing that, initialize a new object.
*/
active_sb = relptr_access(base, heap->spans[1]);
Assert(active_sb != NULL && active_sb->nallocatable > 0);
- superblock = relptr_access(base, active_sb->start);
- Assert(size_class < SB_NUM_SIZE_CLASSES);
- obsize = sb_size_classes[size_class];
- if (active_sb->firstfree != SB_SPAN_NOTHING_FREE)
+ block = relptr_access(base, active_sb->start);
+ Assert(size_class < BA_NUM_SIZE_CLASSES);
+ obsize = balloc_size_classes[size_class];
+ if (active_sb->firstfree != BA_SPAN_NOTHING_FREE)
{
- result = superblock + active_sb->firstfree * obsize;
+ result = block + active_sb->firstfree * obsize;
active_sb->firstfree = * (Size *) result;
}
else
{
- result = superblock + active_sb->ninitialized * obsize;
+ result = block + active_sb->ninitialized * obsize;
++active_sb->ninitialized;
}
--active_sb->nallocatable;
/* If it's now full, move it to the highest-numbered fullness class. */
if (active_sb->nallocatable == 0)
- sb_transfer_first_span(base, heap, 1, SB_FULLNESS_CLASSES - 1);
+ sb_transfer_first_span(base, heap, 1, BA_FULLNESS_CLASSES - 1);
/* We're all done. Release the lock. */
if (lock != NULL)
}
/*
- * Ensure an active (i.e. fullness class 1) superblock, unless all existing
- * superblocks are completely full and no more can be allocated.
+ * Ensure an active (i.e. fullness class 1) block, unless all existing
+ * blocks are completely full and no more can be allocated.
*
- * Fullness classes K of 0..N is loosely intended to represent superblocks
+ * Fullness classes K of 0..N is loosely intended to represent blocks
* whose utilization percentage is at least K/N, but we only enforce this
* rigorously for the highest-numbered fullness class, which always contains
* exactly those blocks that are completely full. It's otherwise acceptable
- * for a superblock to be in a higher-numbered fullness class than the one
- * to which it logically belongs. In addition, the active superblock, which
+ * for a block to be in a higher-numbered fullness class than the one
+ * to which it logically belongs. In addition, the active block, which
* is always the first block in fullness class 1, is permitted to have a
* higher allocation percentage than would normally be allowable for that
* fullness class; we don't move it until it's completely full, and then
* it goes to the highest-numbered fullness class.
*
- * It might seem odd that the active superblock is the head of fullness class
+ * It might seem odd that the active block is the head of fullness class
* 1 rather than fullness class 0, but experience with other allocators has
- * shown that it's usually better to allocate from a superblock that's
+ * shown that it's usually better to allocate from a block that's
* moderately full rather than one that's nearly empty. Insofar as is
* reasonably possible, we want to avoid performing new allocations in a
- * superblock that would otherwise become empty soon.
+ * block that would otherwise become empty soon.
*/
static bool
-sb_ensure_active_superblock(char *base, AllocatorRegion *region,
- BlockAllocatorContext *context,
- sb_heap *heap, int size_class)
+BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region,
+ BlockAllocatorContext *context,
+ BlockAllocatorHeap *heap, int size_class)
{
- Size obsize = sb_size_classes[size_class];
+ Size obsize = balloc_size_classes[size_class];
Size nmax;
int fclass;
- sb_span *span = NULL;
+ BlockAllocatorSpan *span = NULL;
Size npages = 1;
Size first_page;
Size i;
void *ptr;
/*
- * Compute the number of objects that will fit in a superblock of this
- * size class. Span-of-spans superblocks are just a single page, and the
+ * Compute the number of objects that will fit in a block of this
+ * size class. Span-of-spans blocks are just a single page, and the
* first object isn't available for use because it describes the
- * span-of-spans itself.
+ * block-of-spans itself.
*/
- if (size_class == SB_SCLASS_SPAN_OF_SPANS)
+ if (size_class == BA_SCLASS_BLOCK_OF_SPANS)
nmax = FPM_PAGE_SIZE / obsize - 1;
else
- nmax = SB_SUPERBLOCK_SIZE / obsize;
+ nmax = BA_SUPERBLOCK_SIZE / obsize;
/*
* If fullness class 1 is empty, try to find something to put in it by
* scanning higher-numbered fullness classes (excluding the last one,
* whose blocks are certain to all be completely full).
*/
- for (fclass = 2; fclass < SB_FULLNESS_CLASSES - 1; ++fclass)
+ for (fclass = 2; fclass < BA_FULLNESS_CLASSES - 1; ++fclass)
{
- sb_span *span;
+ BlockAllocatorSpan *span;
span = relptr_access(base, heap->spans[fclass]);
while (span != NULL)
{
int tfclass;
- sb_span *nextspan;
- sb_span *prevspan;
+ BlockAllocatorSpan *nextspan;
+ BlockAllocatorSpan *prevspan;
/* Figure out what fullness class should contain this. */
tfclass = (nmax - span->nallocatable)
- * (SB_FULLNESS_CLASSES - 1) / nmax;
+ * (BA_FULLNESS_CLASSES - 1) / nmax;
/* Look up next span. */
nextspan = relptr_access(base, span->nextspan);
prevspan = relptr_access(base, span->prevspan);
relptr_copy(span->nextspan, heap->spans[tfclass]);
- relptr_store(base, span->prevspan, (sb_span *) NULL);
+ relptr_store(base, span->prevspan,
+ (BlockAllocatorSpan *) NULL);
if (nextspan != NULL)
relptr_copy(nextspan->prevspan, span->prevspan);
if (prevspan != NULL)
span = nextspan;
}
- /* Stop now if we found a suitable superblock. */
+ /* Stop now if we found a suitable block. */
if (!relptr_is_null(heap->spans[1]))
return true;
}
/*
- * If there are no superblocks that properly belong in fullness class 1,
+ * If there are no blocks that properly belong in fullness class 1,
* pick one from some other fullness class and move it there anyway, so
* that we have an allocation target. Our last choice is to transfer a
- * superblock that's almost empty (and might become completely empty soon
+ * block that's almost empty (and might become completely empty soon
* if left alone), but even that is better than failing, which is what we
- * must do if there are no superblocks at all with freespace.
+ * must do if there are no blocks at all with freespace.
*/
Assert(relptr_is_null(heap->spans[1]));
- for (fclass = 2; fclass < SB_FULLNESS_CLASSES - 1; ++fclass)
+ for (fclass = 2; fclass < BA_FULLNESS_CLASSES - 1; ++fclass)
if (sb_transfer_first_span(base, heap, fclass, 1))
return true;
if (relptr_is_null(heap->spans[1]) &&
return true;
/*
- * Get an sb_span object to describe the new superblock... unless
- * this allocation is for an sb_span object, in which case that's
- * surely not going to work. We handle that case by storing the
- * sb_span describing an sb_span superblock inline.
+ * Get a BlockAllocatorSpan object to describe the new block...
+ * unless this allocation is for an BlockAllocatorSpan object, in which
+ * case that's surely not going to work. We handle that case by storing
+ * the span describing a block-of-spans inline.
*/
- if (size_class != SB_SCLASS_SPAN_OF_SPANS)
+ if (size_class != BA_SCLASS_BLOCK_OF_SPANS)
{
AllocatorRegion *span_region = context->private ? NULL : region;
- span = (sb_span *) BlockAllocatorAllocGuts(base, span_region, context,
- SB_SCLASS_SPAN_OF_SPANS);
+ span = (BlockAllocatorSpan *)
+ BlockAllocatorAllocGuts(base, span_region, context,
+ BA_SCLASS_BLOCK_OF_SPANS);
if (span == NULL)
return false;
npages = BLOCK_ALLOCATOR_PAGES_PER_CHUNK;
}
- /* Find a region from which to allocate the superblock. */
+ /* Find a region from which to allocate the block. */
if (region == NULL)
{
Assert(context->private);
region = GetRegionForPrivateAllocation(npages);
}
- /* Try to allocate the actual superblock. */
+ /* Try to allocate the actual block. */
if (region == NULL ||
!FreePageManagerGet(region->fpm, npages, &first_page))
{
ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page);
/*
- * If this is a span-of-spans, carve the descriptor right out of
+ * If this is a block-of-spans, carve the descriptor right out of
* the allocated space.
*/
- if (size_class == SB_SCLASS_SPAN_OF_SPANS)
- span = (sb_span *) ptr;
+ if (size_class == BA_SCLASS_BLOCK_OF_SPANS)
+ span = (BlockAllocatorSpan *) ptr;
/* Initialize span and pagemap. */
sb_init_span(base, span, heap, ptr, npages, size_class);
* Add a new span to fullness class 1 of the indicated heap.
*/
static void
-sb_init_span(char *base, sb_span *span, sb_heap *heap, char *ptr,
- Size npages, uint16 size_class)
+sb_init_span(char *base, BlockAllocatorSpan *span, BlockAllocatorHeap *heap,
+ char *ptr, Size npages, uint16 size_class)
{
- sb_span *head = relptr_access(base, heap->spans[1]);
- Size obsize = sb_size_classes[size_class];
+ BlockAllocatorSpan *head = relptr_access(base, heap->spans[1]);
+ Size obsize = balloc_size_classes[size_class];
if (head != NULL)
relptr_store(base, head->prevspan, span);
relptr_store(base, span->parent, heap);
relptr_store(base, span->nextspan, head);
- relptr_store(base, span->prevspan, (sb_span *) NULL);
+ relptr_store(base, span->prevspan, (BlockAllocatorSpan *) NULL);
relptr_store(base, heap->spans[1], span);
relptr_store(base, span->start, ptr);
span->npages = npages;
span->size_class = size_class;
span->ninitialized = 0;
- if (size_class == SB_SCLASS_SPAN_OF_SPANS)
+ if (size_class == BA_SCLASS_BLOCK_OF_SPANS)
{
/*
- * A span-of-spans contains its own descriptor, so mark one object
+ * A block-of-spans contains its own descriptor, so mark one object
* as initialized and reduce the count of allocatable objects by one.
* Doing this here has the side effect of also reducing nmax by one,
* which is important to make sure we free this object at the correct
span->ninitialized = 1;
span->nallocatable = FPM_PAGE_SIZE / obsize - 1;
}
- else if (size_class != SB_SCLASS_SPAN_LARGE)
- span->nallocatable = SB_SUPERBLOCK_SIZE / obsize;
- span->firstfree = SB_SPAN_NOTHING_FREE;
+ else if (size_class != BA_SCLASS_SPAN_LARGE)
+ span->nallocatable = BA_SUPERBLOCK_SIZE / obsize;
+ span->firstfree = BA_SPAN_NOTHING_FREE;
span->nmax = span->nallocatable;
span->fclass = 1;
}
* fullness class.
*/
static bool
-sb_transfer_first_span(char *base, sb_heap *heap, int fromclass, int toclass)
+sb_transfer_first_span(char *base, BlockAllocatorHeap *heap,
+ int fromclass, int toclass)
{
- sb_span *span;
- sb_span *nextspan;
+ BlockAllocatorSpan *span;
+ BlockAllocatorSpan *nextspan;
/* Can't do it if source list is empty. */
span = relptr_access(base, heap->spans[fromclass]);
nextspan = relptr_access(base, span->nextspan);
relptr_store(base, heap->spans[fromclass], nextspan);
if (nextspan != NULL)
- relptr_store(base, nextspan->prevspan, (sb_span *) NULL);
+ relptr_store(base, nextspan->prevspan, (BlockAllocatorSpan *) NULL);
/* Add span to target list. */
relptr_copy(span->nextspan, heap->spans[toclass]);
* Remove span from current list.
*/
static void
-sb_unlink_span(char *base, sb_heap *heap, sb_span *span)
+sb_unlink_span(char *base, BlockAllocatorHeap *heap, BlockAllocatorSpan *span)
{
- sb_span *nextspan = relptr_access(base, span->nextspan);
- sb_span *prevspan = relptr_access(base, span->prevspan);
+ BlockAllocatorSpan *nextspan = relptr_access(base, span->nextspan);
+ BlockAllocatorSpan *prevspan = relptr_access(base, span->prevspan);
- relptr_store(base, span->prevspan, (sb_span *) NULL);
+ relptr_store(base, span->prevspan, (BlockAllocatorSpan *) NULL);
if (nextspan != NULL)
relptr_copy(nextspan->prevspan, span->prevspan);
if (prevspan != NULL)