@@ -156,6 +156,36 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
156156 return buf ;
157157}
158158
159+ /*
160+ * _hash_initbuf() -- Get and initialize a buffer by bucket number.
161+ */
162+ void
163+ _hash_initbuf (Buffer buf , uint32 max_bucket , uint32 num_bucket , uint32 flag ,
164+ bool initpage )
165+ {
166+ HashPageOpaque pageopaque ;
167+ Page page ;
168+
169+ page = BufferGetPage (buf );
170+
171+ /* initialize the page */
172+ if (initpage )
173+ _hash_pageinit (page , BufferGetPageSize (buf ));
174+
175+ pageopaque = (HashPageOpaque ) PageGetSpecialPointer (page );
176+
177+ /*
178+ * Set hasho_prevblkno with current hashm_maxbucket. This value will
179+ * be used to validate cached HashMetaPageData. See
180+ * _hash_getbucketbuf_from_hashkey().
181+ */
182+ pageopaque -> hasho_prevblkno = max_bucket ;
183+ pageopaque -> hasho_nextblkno = InvalidBlockNumber ;
184+ pageopaque -> hasho_bucket = num_bucket ;
185+ pageopaque -> hasho_flag = flag ;
186+ pageopaque -> hasho_page_id = HASHO_PAGE_ID ;
187+ }
188+
159189/*
160190 * _hash_getnewbuf() -- Get a new page at the end of the index.
161191 *
@@ -288,7 +318,7 @@ _hash_dropscanbuf(Relation rel, HashScanOpaque so)
288318
289319
290320/*
291- * _hash_metapinit () -- Initialize the metadata page of a hash index,
321+ * _hash_init () -- Initialize the metadata page of a hash index,
292322 * the initial buckets, and the initial bitmap page.
293323 *
294324 * The initial number of buckets is dependent on num_tuples, an estimate
@@ -300,19 +330,18 @@ _hash_dropscanbuf(Relation rel, HashScanOpaque so)
300330 * multiple buffer locks is ignored.
301331 */
302332uint32
303- _hash_metapinit (Relation rel , double num_tuples , ForkNumber forkNum )
333+ _hash_init (Relation rel , double num_tuples , ForkNumber forkNum )
304334{
305- HashMetaPage metap ;
306- HashPageOpaque pageopaque ;
307335 Buffer metabuf ;
308336 Buffer buf ;
337+ Buffer bitmapbuf ;
309338 Page pg ;
339+ HashMetaPage metap ;
340+ RegProcedure procid ;
310341 int32 data_width ;
311342 int32 item_width ;
312343 int32 ffactor ;
313- double dnumbuckets ;
314344 uint32 num_buckets ;
315- uint32 log2_num_buckets ;
316345 uint32 i ;
317346
318347 /* safety check */
@@ -334,6 +363,96 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
334363 if (ffactor < 10 )
335364 ffactor = 10 ;
336365
366+ procid = index_getprocid (rel , 1 , HASHPROC );
367+
368+ /*
369+ * We initialize the metapage, the first N bucket pages, and the first
370+ * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
371+ * calls to occur. This ensures that the smgr level has the right idea of
372+ * the physical index length.
373+ *
374+ * Critical section not required, because on error the creation of the
375+ * whole relation will be rolled back.
376+ */
377+ metabuf = _hash_getnewbuf (rel , HASH_METAPAGE , forkNum );
378+ _hash_init_metabuffer (metabuf , num_tuples , procid , ffactor , false);
379+ MarkBufferDirty (metabuf );
380+
381+ pg = BufferGetPage (metabuf );
382+ metap = HashPageGetMeta (pg );
383+
384+ num_buckets = metap -> hashm_maxbucket + 1 ;
385+
386+ /*
387+ * Release buffer lock on the metapage while we initialize buckets.
388+ * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
389+ * won't accomplish anything. It's a bad idea to hold buffer locks for
390+ * long intervals in any case, since that can block the bgwriter.
391+ */
392+ LockBuffer (metabuf , BUFFER_LOCK_UNLOCK );
393+
394+ /*
395+ * Initialize and WAL Log the first N buckets
396+ */
397+ for (i = 0 ; i < num_buckets ; i ++ )
398+ {
399+ BlockNumber blkno ;
400+
401+ /* Allow interrupts, in case N is huge */
402+ CHECK_FOR_INTERRUPTS ();
403+
404+ blkno = BUCKET_TO_BLKNO (metap , i );
405+ buf = _hash_getnewbuf (rel , blkno , forkNum );
406+ _hash_initbuf (buf , metap -> hashm_maxbucket , i , LH_BUCKET_PAGE , false);
407+ MarkBufferDirty (buf );
408+ _hash_relbuf (rel , buf );
409+ }
410+
411+ /* Now reacquire buffer lock on metapage */
412+ LockBuffer (metabuf , BUFFER_LOCK_EXCLUSIVE );
413+
414+ /*
415+ * Initialize bitmap page
416+ */
417+ bitmapbuf = _hash_getnewbuf (rel , num_buckets + 1 , forkNum );
418+ _hash_initbitmapbuffer (bitmapbuf , metap -> hashm_bmsize , false);
419+ MarkBufferDirty (bitmapbuf );
420+
421+ /* add the new bitmap page to the metapage's list of bitmaps */
422+ /* metapage already has a write lock */
423+ if (metap -> hashm_nmaps >= HASH_MAX_BITMAPS )
424+ ereport (ERROR ,
425+ (errcode (ERRCODE_PROGRAM_LIMIT_EXCEEDED ),
426+ errmsg ("out of overflow pages in hash index \"%s\"" ,
427+ RelationGetRelationName (rel ))));
428+
429+ metap -> hashm_mapp [metap -> hashm_nmaps ] = num_buckets + 1 ;
430+
431+ metap -> hashm_nmaps ++ ;
432+ MarkBufferDirty (metabuf );
433+
434+ /* all done */
435+ _hash_relbuf (rel , bitmapbuf );
436+ _hash_relbuf (rel , metabuf );
437+
438+ return num_buckets ;
439+ }
440+
441+ /*
442+ * _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
443+ */
444+ void
445+ _hash_init_metabuffer (Buffer buf , double num_tuples , RegProcedure procid ,
446+ uint16 ffactor , bool initpage )
447+ {
448+ HashMetaPage metap ;
449+ HashPageOpaque pageopaque ;
450+ Page page ;
451+ double dnumbuckets ;
452+ uint32 num_buckets ;
453+ uint32 log2_num_buckets ;
454+ uint32 i ;
455+
337456 /*
338457 * Choose the number of initial bucket pages to match the fill factor
339458 * given the estimated number of tuples. We round up the result to the
@@ -353,30 +472,25 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
353472 Assert (num_buckets == (((uint32 ) 1 ) << log2_num_buckets ));
354473 Assert (log2_num_buckets < HASH_MAX_SPLITPOINTS );
355474
356- /*
357- * We initialize the metapage, the first N bucket pages, and the first
358- * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
359- * calls to occur. This ensures that the smgr level has the right idea of
360- * the physical index length.
361- */
362- metabuf = _hash_getnewbuf (rel , HASH_METAPAGE , forkNum );
363- pg = BufferGetPage (metabuf );
475+ page = BufferGetPage (buf );
476+ if (initpage )
477+ _hash_pageinit (page , BufferGetPageSize (buf ));
364478
365- pageopaque = (HashPageOpaque ) PageGetSpecialPointer (pg );
479+ pageopaque = (HashPageOpaque ) PageGetSpecialPointer (page );
366480 pageopaque -> hasho_prevblkno = InvalidBlockNumber ;
367481 pageopaque -> hasho_nextblkno = InvalidBlockNumber ;
368482 pageopaque -> hasho_bucket = -1 ;
369483 pageopaque -> hasho_flag = LH_META_PAGE ;
370484 pageopaque -> hasho_page_id = HASHO_PAGE_ID ;
371485
372- metap = HashPageGetMeta (pg );
486+ metap = HashPageGetMeta (page );
373487
374488 metap -> hashm_magic = HASH_MAGIC ;
375489 metap -> hashm_version = HASH_VERSION ;
376490 metap -> hashm_ntuples = 0 ;
377491 metap -> hashm_nmaps = 0 ;
378492 metap -> hashm_ffactor = ffactor ;
379- metap -> hashm_bsize = HashGetMaxBitmapSize (pg );
493+ metap -> hashm_bsize = HashGetMaxBitmapSize (page );
380494 /* find largest bitmap array size that will fit in page size */
381495 for (i = _hash_log2 (metap -> hashm_bsize ); i > 0 ; -- i )
382496 {
@@ -393,7 +507,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
393507 * pretty useless for normal operation (in fact, hashm_procid is not used
394508 * anywhere), but it might be handy for forensic purposes so we keep it.
395509 */
396- metap -> hashm_procid = index_getprocid ( rel , 1 , HASHPROC ) ;
510+ metap -> hashm_procid = procid ;
397511
398512 /*
399513 * We initialize the index with N buckets, 0 .. N-1, occupying physical
@@ -411,54 +525,9 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
411525 metap -> hashm_ovflpoint = log2_num_buckets ;
412526 metap -> hashm_firstfree = 0 ;
413527
414- /*
415- * Release buffer lock on the metapage while we initialize buckets.
416- * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
417- * won't accomplish anything. It's a bad idea to hold buffer locks for
418- * long intervals in any case, since that can block the bgwriter.
419- */
420- MarkBufferDirty (metabuf );
421- LockBuffer (metabuf , BUFFER_LOCK_UNLOCK );
422-
423- /*
424- * Initialize the first N buckets
425- */
426- for (i = 0 ; i < num_buckets ; i ++ )
427- {
428- /* Allow interrupts, in case N is huge */
429- CHECK_FOR_INTERRUPTS ();
430-
431- buf = _hash_getnewbuf (rel , BUCKET_TO_BLKNO (metap , i ), forkNum );
432- pg = BufferGetPage (buf );
433- pageopaque = (HashPageOpaque ) PageGetSpecialPointer (pg );
434-
435- /*
436- * Set hasho_prevblkno with current hashm_maxbucket. This value will
437- * be used to validate cached HashMetaPageData. See
438- * _hash_getbucketbuf_from_hashkey().
439- */
440- pageopaque -> hasho_prevblkno = metap -> hashm_maxbucket ;
441- pageopaque -> hasho_nextblkno = InvalidBlockNumber ;
442- pageopaque -> hasho_bucket = i ;
443- pageopaque -> hasho_flag = LH_BUCKET_PAGE ;
444- pageopaque -> hasho_page_id = HASHO_PAGE_ID ;
445- MarkBufferDirty (buf );
446- _hash_relbuf (rel , buf );
447- }
448-
449- /* Now reacquire buffer lock on metapage */
450- LockBuffer (metabuf , BUFFER_LOCK_EXCLUSIVE );
451-
452- /*
453- * Initialize first bitmap page
454- */
455- _hash_initbitmap (rel , metap , num_buckets + 1 , forkNum );
456-
457- /* all done */
458- MarkBufferDirty (metabuf );
459- _hash_relbuf (rel , metabuf );
460-
461- return num_buckets ;
528+ /* Set pd_lower just past the end of the metadata. */
529+ ((PageHeader ) page )-> pd_lower =
530+ ((char * ) metap + sizeof (HashMetaPageData )) - (char * ) page ;
462531}
463532
464533/*
@@ -535,7 +604,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
535604 * than a disk block then this would be an independent constraint.
536605 *
537606 * If you change this, see also the maximum initial number of buckets in
538- * _hash_metapinit ().
607+ * _hash_init ().
539608 */
540609 if (metap -> hashm_maxbucket >= (uint32 ) 0x7FFFFFFE )
541610 goto fail ;
0 commit comments