From: Robert Haas Date: Wed, 19 Feb 2014 20:26:16 +0000 (-0500) Subject: More hacking. X-Git-Url: http://git.postgresql.org/gitweb/static/close/reject?a=commitdiff_plain;h=d9464ad46457321ce44df0249315099c67127241;p=users%2Frhaas%2Fpostgres.git More hacking. --- diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index c317a5fcbe..e6a30a1b8c 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -83,6 +83,9 @@ typedef struct FreePageBtreeSearchResult } FreePageBtreeSearchResult; /* Helper functions */ +static bool FreePageManagerGetInternal(FreePageManager *fpm, Size npages, + Size *first_page); +static void FreePageBtreeRecycle(FreePageManager *fpm, Size pageno); static void FreePageBtreeReduceAncestorKeys(FreePageManager *fpm, FreePageBtree *btp); static void FreePageBtreePageRemove(FreePageBtree *btp, Size index); @@ -145,15 +148,69 @@ bool FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page) { LWLock *lock = fpm_lock(fpm); - char *base = fpm_segment_base(fpm); - FreePageSpanLeader *victim = NULL; - Size victim_page = 0; /* placate compiler */ - Size f; + bool result; + + if (lock != NULL) + LWLockAcquire(lock, LW_EXCLUSIVE); + result = FreePageManagerGetInternal(fpm, npages, first_page); + if (lock != NULL) + LWLockRelease(lock); + + return true; +} + +/* + * Transfer a run of pages to the free page manager. + */ +void +FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages) +{ + LWLock *lock = fpm_lock(fpm); + Assert(npages > 0); /* Acquire lock (if there is one). */ if (lock != NULL) LWLockAcquire(lock, LW_EXCLUSIVE); + /* + * As a special case, we store the very first range in the FreePageManager + * itself, so that a request for the entire number of pages will succeed. + * Otherwise, we must build or update a btree. + */ + if (fpm->btree_depth == 0 && fpm->singleton_npages == 0) + { + fpm->singleton_first_page = first_page; + fpm->singleton_npages = npages; + } + else if (fpm->btree_depth == 0) + { + /* XXX Create the btree. */ + } + else + FreePageManagerPutInternal(fpm, first_page, npages, false); + + /* Release lock (if there is one). */ + if (lock != NULL) + LWLockRelease(lock); +} + +/* + * Like FreePageManagerGet, this function allocates a run of pages of the + * given length from the free page manager, but without taking and releasing + * the lock. The caller is responsible for making sure the lock is already + * held. + */ +static bool +FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page) +{ + char *base = fpm_segment_base(fpm); + FreePageSpanLeader *victim = NULL; + FreePageSpanLeader *prev; + FreePageSpanLeader *next; + FreePageBtreeSearchResult result; + Size victim_page = 0; /* placate compiler */ + Size f; + /* * Search for a free span. * @@ -200,73 +257,30 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page) break; } - /* If we found a victim, remove it from the freelist and btree. */ - if (victim != NULL) - { - FreePageSpanLeader *prev = relptr_access(base, victim->prev); - FreePageSpanLeader *next = relptr_access(base, victim->next); - FreePageBtreeSearchResult result; - - if (prev != NULL) - relptr_copy(prev->next, victim->next); - else - relptr_copy(fpm->freelist[f], victim->next); - if (next != NULL) - relptr_copy(next->prev, victim->prev); - - victim_page = fpm_pointer_to_page(base, victim); - FreePageBtreeSearch(fpm, victim_page, &result); - Assert(result.page_exact != NULL); - FreePageBtreeRemove(fpm, result.page_exact, result.index_exact); - - /* XXX. But the span we found might have been oversized ... we - * need to put the rest back! */ - } - - /* Release lock (if there is one). */ - if (lock != NULL) - LWLockRelease(lock); - - /* Return results to caller. */ + /* If we didn't find an allocatable span, return failure. */ if (victim == NULL) return false; - *first_page = victim_page; - return true; -} -/* - * Transfer a run of pages to the free page manager. - */ -void -FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages) -{ - LWLock *lock = fpm_lock(fpm); - Assert(npages > 0); + /* Remove span from free list. */ + prev = relptr_access(base, victim->prev); + next = relptr_access(base, victim->next); + if (prev != NULL) + relptr_copy(prev->next, victim->next); + else + relptr_copy(fpm->freelist[f], victim->next); + if (next != NULL) + relptr_copy(next->prev, victim->prev); - /* Acquire lock (if there is one). */ - if (lock != NULL) - LWLockAcquire(lock, LW_EXCLUSIVE); + /* Remove span from btree. */ + FreePageBtreeSearch(fpm, victim_page, &result); + Assert(result.page_exact != NULL); + FreePageBtreeRemove(fpm, result.page_exact, result.index_exact); - /* - * As a special case, we store the very first range in the FreePageManager - * itself, so that a request for the entire number of pages will succeed. - * Otherwise, we must build or update a btree. - */ - if (fpm->btree_depth == 0 && fpm->singleton_npages == 0) - { - fpm->singleton_first_page = first_page; - fpm->singleton_npages = npages; - } - else if (fpm->btree_depth == 0) - { - /* XXX Create the btree. */ - } - else - FreePageManagerPutInternal(fpm, first_page, npages, false); + /* XXX. But the span we found might have been oversized ... we + * need to put the rest back! */ - /* Release lock (if there is one). */ - if (lock != NULL) - LWLockRelease(lock); + *first_page = fpm_pointer_to_page(base, victim); + return true; } /* @@ -709,14 +723,26 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages, /* Check whether we need to allocate more btree pages to split. */ if (result.split_pages > fpm->btree_recycle_count) { - Size pages_needed; - - pages_needed = (result.split_pages + 1) - fpm->btree_recycle_count; + Size pages_needed; + Size recycle_page; + Size i; /* - * XXX. Allocate the required number of pages and recycle each - * one in turn. + * Allocate the required number of pages and split each one in + * turn. This should never fail, because if we've got enough spans + * of free pages kicking around that we need additional storage + * space just to remember them all, then we should certainly have + * enough to expand the btree, which should only ever use a tiny + * number of pages compared to the number under management. If + * it does, something's badly screwed up. */ + pages_needed = result.split_pages - fpm->btree_recycle_count; + for (i = 0; i < pages_needed; ++i) + { + if (!FreePageManagerGetInternal(fpm, 1, &recycle_page)) + elog(FATAL, "free page manager btree is corrupt"); + FreePageBtreeRecycle(fpm, recycle_page); + } /* * The act of allocating pages to recycle may have invalidated