@@ -377,12 +377,11 @@ _hash_firstfreebit(uint32 map)
377377 * NB: caller must not hold lock on metapage, nor on page, that's next to
378378 * ovflbuf in the bucket chain. We don't acquire the lock on page that's
379379 * prior to ovflbuf in chain if it is same as wbuf because the caller already
380- * has a lock on same. This function releases the lock on wbuf and caller
381- * is responsible for releasing the pin on same.
380+ * has a lock on same.
382381 */
383382BlockNumber
384383_hash_freeovflpage (Relation rel , Buffer ovflbuf , Buffer wbuf ,
385- bool wbuf_dirty , BufferAccessStrategy bstrategy )
384+ BufferAccessStrategy bstrategy )
386385{
387386 HashMetaPage metap ;
388387 Buffer metabuf ;
@@ -447,24 +446,10 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
447446 Assert (prevopaque -> hasho_bucket == bucket );
448447 prevopaque -> hasho_nextblkno = nextblkno ;
449448
449+ MarkBufferDirty (prevbuf );
450450 if (prevblkno != writeblkno )
451- {
452- MarkBufferDirty (prevbuf );
453451 _hash_relbuf (rel , prevbuf );
454- }
455- else
456- {
457- /* ensure to mark prevbuf as dirty */
458- wbuf_dirty = true;
459- }
460452 }
461-
462- /* write and unlock the write buffer */
463- if (wbuf_dirty )
464- _hash_chgbufaccess (rel , wbuf , HASH_WRITE , HASH_NOLOCK );
465- else
466- _hash_chgbufaccess (rel , wbuf , HASH_READ , HASH_NOLOCK );
467-
468453 if (BlockNumberIsValid (nextblkno ))
469454 {
470455 Buffer nextbuf = _hash_getbuf_with_strategy (rel ,
@@ -783,30 +768,28 @@ _hash_squeezebucket(Relation rel,
783768 * Tricky point here: if our read and write pages are adjacent in the
784769 * bucket chain, our write lock on wbuf will conflict with
785770 * _hash_freeovflpage's attempt to update the sibling links of the
786- * removed page. In that case, we don't need to lock it again and we
787- * always release the lock on wbuf in _hash_freeovflpage and then
788- * retake it again here. This will not only simplify the code, but is
789- * required to atomically log the changes which will be helpful when
790- * we write WAL for hash indexes.
771+ * removed page. In that case, we don't need to lock it again.
791772 */
792773 rblkno = ropaque -> hasho_prevblkno ;
793774 Assert (BlockNumberIsValid (rblkno ));
794775
795776 /* free this overflow page (releases rbuf) */
796- _hash_freeovflpage (rel , rbuf , wbuf , wbuf_dirty , bstrategy );
777+ _hash_freeovflpage (rel , rbuf , wbuf , bstrategy );
778+
779+ if (wbuf_dirty )
780+ MarkBufferDirty (wbuf );
797781
798782 /* are we freeing the page adjacent to wbuf? */
799783 if (rblkno == wblkno )
800784 {
801785 /* retain the pin on primary bucket page till end of bucket scan */
802- if (wblkno != bucket_blkno )
803- _hash_dropbuf (rel , wbuf );
786+ if (wblkno == bucket_blkno )
787+ _hash_chgbufaccess (rel , wbuf , HASH_READ , HASH_NOLOCK );
788+ else
789+ _hash_relbuf (rel , wbuf );
804790 return ;
805791 }
806792
807- /* lock the overflow page being written, then get the previous one */
808- _hash_chgbufaccess (rel , wbuf , HASH_NOLOCK , HASH_WRITE );
809-
810793 rbuf = _hash_getbuf_with_strategy (rel ,
811794 rblkno ,
812795 HASH_WRITE ,
0 commit comments