} FreePageBtreeSearchResult;
/* Helper functions */
+static bool FreePageManagerGetInternal(FreePageManager *fpm, Size npages,
+ Size *first_page);
+static void FreePageBtreeRecycle(FreePageManager *fpm, Size pageno);
static void FreePageBtreeReduceAncestorKeys(FreePageManager *fpm,
FreePageBtree *btp);
static void FreePageBtreePageRemove(FreePageBtree *btp, Size index);
FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
{
LWLock *lock = fpm_lock(fpm);
- char *base = fpm_segment_base(fpm);
- FreePageSpanLeader *victim = NULL;
- Size victim_page = 0; /* placate compiler */
- Size f;
+ bool result;
+
+ if (lock != NULL)
+ LWLockAcquire(lock, LW_EXCLUSIVE);
+ result = FreePageManagerGetInternal(fpm, npages, first_page);
+ if (lock != NULL)
+ LWLockRelease(lock);
+
+ return true;
+}
+
+/*
+ * Transfer a run of pages to the free page manager.
+ */
+void
+FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
+{
+ LWLock *lock = fpm_lock(fpm);
+ Assert(npages > 0);
/* Acquire lock (if there is one). */
if (lock != NULL)
LWLockAcquire(lock, LW_EXCLUSIVE);
+ /*
+ * As a special case, we store the very first range in the FreePageManager
+ * itself, so that a request for the entire number of pages will succeed.
+ * Otherwise, we must build or update a btree.
+ */
+ if (fpm->btree_depth == 0 && fpm->singleton_npages == 0)
+ {
+ fpm->singleton_first_page = first_page;
+ fpm->singleton_npages = npages;
+ }
+ else if (fpm->btree_depth == 0)
+ {
+ /* XXX Create the btree. */
+ }
+ else
+ FreePageManagerPutInternal(fpm, first_page, npages, false);
+
+ /* Release lock (if there is one). */
+ if (lock != NULL)
+ LWLockRelease(lock);
+}
+
+/*
+ * Like FreePageManagerGet, this function allocates a run of pages of the
+ * given length from the free page manager, but without taking and releasing
+ * the lock. The caller is responsible for making sure the lock is already
+ * held.
+ */
+static bool
+FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
+{
+ char *base = fpm_segment_base(fpm);
+ FreePageSpanLeader *victim = NULL;
+ FreePageSpanLeader *prev;
+ FreePageSpanLeader *next;
+ FreePageBtreeSearchResult result;
+ Size victim_page = 0; /* placate compiler */
+ Size f;
+
/*
* Search for a free span.
*
break;
}
- /* If we found a victim, remove it from the freelist and btree. */
- if (victim != NULL)
- {
- FreePageSpanLeader *prev = relptr_access(base, victim->prev);
- FreePageSpanLeader *next = relptr_access(base, victim->next);
- FreePageBtreeSearchResult result;
-
- if (prev != NULL)
- relptr_copy(prev->next, victim->next);
- else
- relptr_copy(fpm->freelist[f], victim->next);
- if (next != NULL)
- relptr_copy(next->prev, victim->prev);
-
- victim_page = fpm_pointer_to_page(base, victim);
- FreePageBtreeSearch(fpm, victim_page, &result);
- Assert(result.page_exact != NULL);
- FreePageBtreeRemove(fpm, result.page_exact, result.index_exact);
-
- /* XXX. But the span we found might have been oversized ... we
- * need to put the rest back! */
- }
-
- /* Release lock (if there is one). */
- if (lock != NULL)
- LWLockRelease(lock);
-
- /* Return results to caller. */
+ /* If we didn't find an allocatable span, return failure. */
if (victim == NULL)
return false;
- *first_page = victim_page;
- return true;
-}
-/*
- * Transfer a run of pages to the free page manager.
- */
-void
-FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
-{
- LWLock *lock = fpm_lock(fpm);
- Assert(npages > 0);
+ /* Remove span from free list. */
+ prev = relptr_access(base, victim->prev);
+ next = relptr_access(base, victim->next);
+ if (prev != NULL)
+ relptr_copy(prev->next, victim->next);
+ else
+ relptr_copy(fpm->freelist[f], victim->next);
+ if (next != NULL)
+ relptr_copy(next->prev, victim->prev);
- /* Acquire lock (if there is one). */
- if (lock != NULL)
- LWLockAcquire(lock, LW_EXCLUSIVE);
+ /* Remove span from btree. */
+ FreePageBtreeSearch(fpm, victim_page, &result);
+ Assert(result.page_exact != NULL);
+ FreePageBtreeRemove(fpm, result.page_exact, result.index_exact);
- /*
- * As a special case, we store the very first range in the FreePageManager
- * itself, so that a request for the entire number of pages will succeed.
- * Otherwise, we must build or update a btree.
- */
- if (fpm->btree_depth == 0 && fpm->singleton_npages == 0)
- {
- fpm->singleton_first_page = first_page;
- fpm->singleton_npages = npages;
- }
- else if (fpm->btree_depth == 0)
- {
- /* XXX Create the btree. */
- }
- else
- FreePageManagerPutInternal(fpm, first_page, npages, false);
+ /* XXX. But the span we found might have been oversized ... we
+ * need to put the rest back! */
- /* Release lock (if there is one). */
- if (lock != NULL)
- LWLockRelease(lock);
+ *first_page = fpm_pointer_to_page(base, victim);
+ return true;
}
/*
/* Check whether we need to allocate more btree pages to split. */
if (result.split_pages > fpm->btree_recycle_count)
{
- Size pages_needed;
-
- pages_needed = (result.split_pages + 1) - fpm->btree_recycle_count;
+ Size pages_needed;
+ Size recycle_page;
+ Size i;
/*
- * XXX. Allocate the required number of pages and recycle each
- * one in turn.
+ * Allocate the required number of pages and split each one in
+ * turn. This should never fail, because if we've got enough spans
+ * of free pages kicking around that we need additional storage
+ * space just to remember them all, then we should certainly have
+ * enough to expand the btree, which should only ever use a tiny
+ * number of pages compared to the number under management. If
+ * it does, something's badly screwed up.
*/
+ pages_needed = result.split_pages - fpm->btree_recycle_count;
+ for (i = 0; i < pages_needed; ++i)
+ {
+ if (!FreePageManagerGetInternal(fpm, 1, &recycle_page))
+ elog(FATAL, "free page manager btree is corrupt");
+ FreePageBtreeRecycle(fpm, recycle_page);
+ }
/*
* The act of allocating pages to recycle may have invalidated