PostgreSQL Source Code git master
nodeHash.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nodeHash.c
4 * Routines to hash relations for hashjoin
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/executor/nodeHash.c
12 *
13 * See note on parallelism in nodeHashjoin.c.
14 *
15 *-------------------------------------------------------------------------
16 */
17/*
18 * INTERFACE ROUTINES
19 * MultiExecHash - generate an in-memory hash table of the relation
20 * ExecInitHash - initialize node and subnodes
21 * ExecEndHash - shutdown node and subnodes
22 */
23
24#include "postgres.h"
25
26#include <math.h>
27#include <limits.h>
28
29#include "access/htup_details.h"
30#include "access/parallel.h"
32#include "commands/tablespace.h"
33#include "executor/executor.h"
34#include "executor/hashjoin.h"
35#include "executor/nodeHash.h"
37#include "miscadmin.h"
38#include "port/pg_bitutils.h"
39#include "utils/lsyscache.h"
40#include "utils/memutils.h"
41#include "utils/syscache.h"
42#include "utils/wait_event.h"
43
44static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
45static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
48static void ExecHashBuildSkewHash(HashState *hashstate,
49 HashJoinTable hashtable, Hash *node,
50 int mcvsToUse);
51static void ExecHashSkewTableInsert(HashJoinTable hashtable,
52 TupleTableSlot *slot,
53 uint32 hashvalue,
54 int bucketNumber);
55static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
56
57static void *dense_alloc(HashJoinTable hashtable, Size size);
59 size_t size,
60 dsa_pointer *shared);
61static void MultiExecPrivateHash(HashState *node);
62static void MultiExecParallelHash(HashState *node);
64 int bucketno);
66 HashJoinTuple tuple);
67static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
68 HashJoinTuple tuple,
69 dsa_pointer tuple_shared);
70static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
75 dsa_pointer *shared);
77 int batchno,
78 size_t size);
79static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
81
82
83/* ----------------------------------------------------------------
84 * ExecHash
85 *
86 * stub for pro forma compliance
87 * ----------------------------------------------------------------
88 */
89static TupleTableSlot *
91{
92 elog(ERROR, "Hash node does not support ExecProcNode call convention");
93 return NULL;
94}
95
96/* ----------------------------------------------------------------
97 * MultiExecHash
98 *
99 * build hash table for hashjoin, doing partitioning if more
100 * than one batch is required.
101 * ----------------------------------------------------------------
102 */
103Node *
105{
106 /* must provide our own instrumentation support */
107 if (node->ps.instrument)
109
110 if (node->parallel_state != NULL)
112 else
114
115 /* must provide our own instrumentation support */
116 if (node->ps.instrument)
118
119 /*
120 * We do not return the hash table directly because it's not a subtype of
121 * Node, and so would violate the MultiExecProcNode API. Instead, our
122 * parent Hashjoin node is expected to know how to fish it out of our node
123 * state. Ugly but not really worth cleaning up, since Hashjoin knows
124 * quite a bit more about Hash besides that.
125 */
126 return NULL;
127}
128
129/* ----------------------------------------------------------------
130 * MultiExecPrivateHash
131 *
132 * parallel-oblivious version, building a backend-private
133 * hash table and (if necessary) batch files.
134 * ----------------------------------------------------------------
135 */
136static void
138{
139 PlanState *outerNode;
140 HashJoinTable hashtable;
141 TupleTableSlot *slot;
142 ExprContext *econtext;
143
144 /*
145 * get state info from node
146 */
147 outerNode = outerPlanState(node);
148 hashtable = node->hashtable;
149
150 /*
151 * set expression context
152 */
153 econtext = node->ps.ps_ExprContext;
154
155 /*
156 * Get all tuples from the node below the Hash node and insert into the
157 * hash table (or temp files).
158 */
159 for (;;)
160 {
161 bool isnull;
162 Datum hashdatum;
163
164 slot = ExecProcNode(outerNode);
165 if (TupIsNull(slot))
166 break;
167 /* We have to compute the hash value */
168 econtext->ecxt_outertuple = slot;
169
170 ResetExprContext(econtext);
171
172 hashdatum = ExecEvalExprSwitchContext(node->hash_expr, econtext,
173 &isnull);
174
175 if (!isnull)
176 {
177 uint32 hashvalue = DatumGetUInt32(hashdatum);
178 int bucketNumber;
179
180 bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
181 if (bucketNumber != INVALID_SKEW_BUCKET_NO)
182 {
183 /* It's a skew tuple, so put it into that hash table */
184 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
185 bucketNumber);
186 hashtable->skewTuples += 1;
187 }
188 else
189 {
190 /* Not subject to skew optimization, so insert normally */
191 ExecHashTableInsert(hashtable, slot, hashvalue);
192 }
193 hashtable->totalTuples += 1;
194 }
195 }
196
197 /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
198 if (hashtable->nbuckets != hashtable->nbuckets_optimal)
200
201 /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
202 hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
203 if (hashtable->spaceUsed > hashtable->spacePeak)
204 hashtable->spacePeak = hashtable->spaceUsed;
205
206 hashtable->partialTuples = hashtable->totalTuples;
207}
208
209/* ----------------------------------------------------------------
210 * MultiExecParallelHash
211 *
212 * parallel-aware version, building a shared hash table and
213 * (if necessary) batch files using the combined effort of
214 * a set of co-operating backends.
215 * ----------------------------------------------------------------
216 */
217static void
219{
220 ParallelHashJoinState *pstate;
221 PlanState *outerNode;
222 HashJoinTable hashtable;
223 TupleTableSlot *slot;
224 ExprContext *econtext;
225 uint32 hashvalue;
226 Barrier *build_barrier;
227 int i;
228
229 /*
230 * get state info from node
231 */
232 outerNode = outerPlanState(node);
233 hashtable = node->hashtable;
234
235 /*
236 * set expression context
237 */
238 econtext = node->ps.ps_ExprContext;
239
240 /*
241 * Synchronize the parallel hash table build. At this stage we know that
242 * the shared hash table has been or is being set up by
243 * ExecHashTableCreate(), but we don't know if our peers have returned
244 * from there or are here in MultiExecParallelHash(), and if so how far
245 * through they are. To find out, we check the build_barrier phase then
246 * and jump to the right step in the build algorithm.
247 */
248 pstate = hashtable->parallel_state;
249 build_barrier = &pstate->build_barrier;
250 Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
251 switch (BarrierPhase(build_barrier))
252 {
254
255 /*
256 * Either I just allocated the initial hash table in
257 * ExecHashTableCreate(), or someone else is doing that. Either
258 * way, wait for everyone to arrive here so we can proceed.
259 */
260 BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
261 /* Fall through. */
262
264
265 /*
266 * It's time to begin hashing, or if we just arrived here then
267 * hashing is already underway, so join in that effort. While
268 * hashing we have to be prepared to help increase the number of
269 * batches or buckets at any time, and if we arrived here when
270 * that was already underway we'll have to help complete that work
271 * immediately so that it's safe to access batches and buckets
272 * below.
273 */
282 for (;;)
283 {
284 bool isnull;
285
286 slot = ExecProcNode(outerNode);
287 if (TupIsNull(slot))
288 break;
289 econtext->ecxt_outertuple = slot;
290
291 ResetExprContext(econtext);
292
294 econtext,
295 &isnull));
296
297 if (!isnull)
298 ExecParallelHashTableInsert(hashtable, slot, hashvalue);
299 hashtable->partialTuples++;
300 }
301
302 /*
303 * Make sure that any tuples we wrote to disk are visible to
304 * others before anyone tries to load them.
305 */
306 for (i = 0; i < hashtable->nbatch; ++i)
307 sts_end_write(hashtable->batches[i].inner_tuples);
308
309 /*
310 * Update shared counters. We need an accurate total tuple count
311 * to control the empty table optimization.
312 */
314
317
318 /*
319 * Wait for everyone to finish building and flushing files and
320 * counters.
321 */
322 if (BarrierArriveAndWait(build_barrier,
323 WAIT_EVENT_HASH_BUILD_HASH_INNER))
324 {
325 /*
326 * Elect one backend to disable any further growth. Batches
327 * are now fixed. While building them we made sure they'd fit
328 * in our memory budget when we load them back in later (or we
329 * tried to do that and gave up because we detected extreme
330 * skew).
331 */
332 pstate->growth = PHJ_GROWTH_DISABLED;
333 }
334 }
335
336 /*
337 * We're not yet attached to a batch. We all agree on the dimensions and
338 * number of inner tuples (for the empty table optimization).
339 */
340 hashtable->curbatch = -1;
341 hashtable->nbuckets = pstate->nbuckets;
342 hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
343 hashtable->totalTuples = pstate->total_tuples;
344
345 /*
346 * Unless we're completely done and the batch state has been freed, make
347 * sure we have accessors.
348 */
349 if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
351
352 /*
353 * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
354 * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
355 * there already).
356 */
357 Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
358 BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
359 BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
360}
361
362/* ----------------------------------------------------------------
363 * ExecInitHash
364 *
365 * Init routine for Hash node
366 * ----------------------------------------------------------------
367 */
368HashState *
369ExecInitHash(Hash *node, EState *estate, int eflags)
370{
371 HashState *hashstate;
372
373 /* check for unsupported flags */
375
376 /*
377 * create state structure
378 */
379 hashstate = makeNode(HashState);
380 hashstate->ps.plan = (Plan *) node;
381 hashstate->ps.state = estate;
382 hashstate->ps.ExecProcNode = ExecHash;
383 /* delay building hashtable until ExecHashTableCreate() in executor run */
384 hashstate->hashtable = NULL;
385
386 /*
387 * Miscellaneous initialization
388 *
389 * create expression context for node
390 */
391 ExecAssignExprContext(estate, &hashstate->ps);
392
393 /*
394 * initialize child nodes
395 */
396 outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
397
398 /*
399 * initialize our result slot and type. No need to build projection
400 * because this node doesn't do projections.
401 */
403 hashstate->ps.ps_ProjInfo = NULL;
404
405 Assert(node->plan.qual == NIL);
406
407 /*
408 * Delay initialization of hash_expr until ExecInitHashJoin(). We cannot
409 * build the ExprState here as we don't yet know the join type we're going
410 * to be hashing values for and we need to know that before calling
411 * ExecBuildHash32Expr as the keep_nulls parameter depends on the join
412 * type.
413 */
414 hashstate->hash_expr = NULL;
415
416 return hashstate;
417}
418
419/* ---------------------------------------------------------------
420 * ExecEndHash
421 *
422 * clean up routine for Hash node
423 * ----------------------------------------------------------------
424 */
425void
427{
429
430 /*
431 * shut down the subplan
432 */
435}
436
437
438/* ----------------------------------------------------------------
439 * ExecHashTableCreate
440 *
441 * create an empty hashtable data structure for hashjoin.
442 * ----------------------------------------------------------------
443 */
446{
447 Hash *node;
448 HashJoinTable hashtable;
449 Plan *outerNode;
450 size_t space_allowed;
451 int nbuckets;
452 int nbatch;
453 double rows;
454 int num_skew_mcvs;
455 int log2_nbuckets;
456 MemoryContext oldcxt;
457
458 /*
459 * Get information about the size of the relation to be hashed (it's the
460 * "outer" subtree of this node, but the inner relation of the hashjoin).
461 * Compute the appropriate size of the hash table.
462 */
463 node = (Hash *) state->ps.plan;
464 outerNode = outerPlan(node);
465
466 /*
467 * If this is shared hash table with a partial plan, then we can't use
468 * outerNode->plan_rows to estimate its size. We need an estimate of the
469 * total number of rows across all copies of the partial plan.
470 */
471 rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
472
473 ExecChooseHashTableSize(rows, outerNode->plan_width,
474 OidIsValid(node->skewTable),
475 state->parallel_state != NULL,
476 state->parallel_state != NULL ?
477 state->parallel_state->nparticipants - 1 : 0,
478 &space_allowed,
479 &nbuckets, &nbatch, &num_skew_mcvs);
480
481 /* nbuckets must be a power of 2 */
482 log2_nbuckets = pg_ceil_log2_32(nbuckets);
483 Assert(nbuckets == (1 << log2_nbuckets));
484
485 /*
486 * Initialize the hash table control block.
487 *
488 * The hashtable control block is just palloc'd from the executor's
489 * per-query memory context. Everything else should be kept inside the
490 * subsidiary hashCxt, batchCxt or spillCxt.
491 */
492 hashtable = palloc_object(HashJoinTableData);
493 hashtable->nbuckets = nbuckets;
494 hashtable->nbuckets_original = nbuckets;
495 hashtable->nbuckets_optimal = nbuckets;
496 hashtable->log2_nbuckets = log2_nbuckets;
497 hashtable->log2_nbuckets_optimal = log2_nbuckets;
498 hashtable->buckets.unshared = NULL;
499 hashtable->skewEnabled = false;
500 hashtable->skewBucket = NULL;
501 hashtable->skewBucketLen = 0;
502 hashtable->nSkewBuckets = 0;
503 hashtable->skewBucketNums = NULL;
504 hashtable->nbatch = nbatch;
505 hashtable->curbatch = 0;
506 hashtable->nbatch_original = nbatch;
507 hashtable->nbatch_outstart = nbatch;
508 hashtable->growEnabled = true;
509 hashtable->totalTuples = 0;
510 hashtable->partialTuples = 0;
511 hashtable->skewTuples = 0;
512 hashtable->innerBatchFile = NULL;
513 hashtable->outerBatchFile = NULL;
514 hashtable->spaceUsed = 0;
515 hashtable->spacePeak = 0;
516 hashtable->spaceAllowed = space_allowed;
517 hashtable->spaceUsedSkew = 0;
518 hashtable->spaceAllowedSkew =
519 hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
520 hashtable->chunks = NULL;
521 hashtable->current_chunk = NULL;
522 hashtable->parallel_state = state->parallel_state;
523 hashtable->area = state->ps.state->es_query_dsa;
524 hashtable->batches = NULL;
525
526#ifdef HJDEBUG
527 printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
528 hashtable, nbatch, nbuckets);
529#endif
530
531 /*
532 * Create temporary memory contexts in which to keep the hashtable working
533 * storage. See notes in executor/hashjoin.h.
534 */
536 "HashTableContext",
538
539 hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
540 "HashBatchContext",
542
543 hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
544 "HashSpillContext",
546
547 /* Allocate data that will live for the life of the hashjoin */
548
549 oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
550
551 if (nbatch > 1 && hashtable->parallel_state == NULL)
552 {
553 MemoryContext oldctx;
554
555 /*
556 * allocate and initialize the file arrays in hashCxt (not needed for
557 * parallel case which uses shared tuplestores instead of raw files)
558 */
559 oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
560
561 hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
562 hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
563
564 MemoryContextSwitchTo(oldctx);
565
566 /* The files will not be opened until needed... */
567 /* ... but make sure we have temp tablespaces established for them */
569 }
570
571 MemoryContextSwitchTo(oldcxt);
572
573 if (hashtable->parallel_state)
574 {
575 ParallelHashJoinState *pstate = hashtable->parallel_state;
576 Barrier *build_barrier;
577
578 /*
579 * Attach to the build barrier. The corresponding detach operation is
580 * in ExecHashTableDetach. Note that we won't attach to the
581 * batch_barrier for batch 0 yet. We'll attach later and start it out
582 * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
583 * then loaded while hashing (the standard hybrid hash join
584 * algorithm), and we'll coordinate that using build_barrier.
585 */
586 build_barrier = &pstate->build_barrier;
587 BarrierAttach(build_barrier);
588
589 /*
590 * So far we have no idea whether there are any other participants,
591 * and if so, what phase they are working on. The only thing we care
592 * about at this point is whether someone has already created the
593 * SharedHashJoinBatch objects and the hash table for batch 0. One
594 * backend will be elected to do that now if necessary.
595 */
596 if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
597 BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
598 {
599 pstate->nbatch = nbatch;
600 pstate->space_allowed = space_allowed;
601 pstate->growth = PHJ_GROWTH_OK;
602
603 /* Set up the shared state for coordinating batches. */
604 ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
605
606 /*
607 * Allocate batch 0's hash table up front so we can load it
608 * directly while hashing.
609 */
610 pstate->nbuckets = nbuckets;
611 ExecParallelHashTableAlloc(hashtable, 0);
612 }
613
614 /*
615 * The next Parallel Hash synchronization point is in
616 * MultiExecParallelHash(), which will progress it all the way to
617 * PHJ_BUILD_RUN. The caller must not return control from this
618 * executor node between now and then.
619 */
620 }
621 else
622 {
623 /*
624 * Prepare context for the first-scan space allocations; allocate the
625 * hashbucket array therein, and set each bucket "empty".
626 */
628
629 hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
630
631 /*
632 * Set up for skew optimization, if possible and there's a need for
633 * more than one batch. (In a one-batch join, there's no point in
634 * it.)
635 */
636 if (nbatch > 1)
637 ExecHashBuildSkewHash(state, hashtable, node, num_skew_mcvs);
638
639 MemoryContextSwitchTo(oldcxt);
640 }
641
642 return hashtable;
643}
644
645
646/*
647 * Compute appropriate size for hashtable given the estimated size of the
648 * relation to be hashed (number of rows and average row width).
649 *
650 * This is exported so that the planner's costsize.c can use it.
651 */
652
653/* Target bucket loading (tuples per bucket) */
654#define NTUP_PER_BUCKET 1
655
656void
657ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
658 bool try_combined_hash_mem,
659 int parallel_workers,
660 size_t *space_allowed,
661 int *numbuckets,
662 int *numbatches,
663 int *num_skew_mcvs)
664{
665 int tupsize;
666 double inner_rel_bytes;
667 size_t hash_table_bytes;
668 size_t bucket_bytes;
669 size_t max_pointers;
670 int nbatch = 1;
671 int nbuckets;
672 double dbuckets;
673
674 /* Force a plausible relation size if no info */
675 if (ntuples <= 0.0)
676 ntuples = 1000.0;
677
678 /*
679 * Estimate tupsize based on footprint of tuple in hashtable... note this
680 * does not allow for any palloc overhead. The manipulations of spaceUsed
681 * don't count palloc overhead either.
682 */
683 tupsize = HJTUPLE_OVERHEAD +
685 MAXALIGN(tupwidth);
686 inner_rel_bytes = ntuples * tupsize;
687
688 /*
689 * Compute in-memory hashtable size limit from GUCs.
690 */
691 hash_table_bytes = get_hash_memory_limit();
692
693 /*
694 * Parallel Hash tries to use the combined hash_mem of all workers to
695 * avoid the need to batch. If that won't work, it falls back to hash_mem
696 * per worker and tries to process batches in parallel.
697 */
698 if (try_combined_hash_mem)
699 {
700 /* Careful, this could overflow size_t */
701 double newlimit;
702
703 newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
704 newlimit = Min(newlimit, (double) SIZE_MAX);
705 hash_table_bytes = (size_t) newlimit;
706 }
707
708 *space_allowed = hash_table_bytes;
709
710 /*
711 * If skew optimization is possible, estimate the number of skew buckets
712 * that will fit in the memory allowed, and decrement the assumed space
713 * available for the main hash table accordingly.
714 *
715 * We make the optimistic assumption that each skew bucket will contain
716 * one inner-relation tuple. If that turns out to be low, we will recover
717 * at runtime by reducing the number of skew buckets.
718 *
719 * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
720 * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
721 * will round up to the next power of 2 and then multiply by 4 to reduce
722 * collisions.
723 */
724 if (useskew)
725 {
726 size_t bytes_per_mcv;
727 size_t skew_mcvs;
728
729 /*----------
730 * Compute number of MCVs we could hold in hash_table_bytes
731 *
732 * Divisor is:
733 * size of a hash tuple +
734 * worst-case size of skewBucket[] per MCV +
735 * size of skewBucketNums[] entry +
736 * size of skew bucket struct itself
737 *----------
738 */
739 bytes_per_mcv = tupsize +
740 (8 * sizeof(HashSkewBucket *)) +
741 sizeof(int) +
743 skew_mcvs = hash_table_bytes / bytes_per_mcv;
744
745 /*
746 * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
747 * not to worry about size_t overflow in the multiplication)
748 */
749 skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
750
751 /* Now clamp to integer range */
752 skew_mcvs = Min(skew_mcvs, INT_MAX);
753
754 *num_skew_mcvs = (int) skew_mcvs;
755
756 /* Reduce hash_table_bytes by the amount needed for the skew table */
757 if (skew_mcvs > 0)
758 hash_table_bytes -= skew_mcvs * bytes_per_mcv;
759 }
760 else
761 *num_skew_mcvs = 0;
762
763 /*
764 * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
765 * memory is filled, assuming a single batch; but limit the value so that
766 * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
767 * nor MaxAllocSize.
768 *
769 * Note that both nbuckets and nbatch must be powers of 2 to make
770 * ExecHashGetBucketAndBatch fast.
771 */
772 max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
773 max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
774 /* If max_pointers isn't a power of 2, must round it down to one */
775 max_pointers = pg_prevpower2_size_t(max_pointers);
776
777 /* Also ensure we avoid integer overflow in nbatch and nbuckets */
778 /* (this step is redundant given the current value of MaxAllocSize) */
779 max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
780
781 dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
782 dbuckets = Min(dbuckets, max_pointers);
783 nbuckets = (int) dbuckets;
784 /* don't let nbuckets be really small, though ... */
785 nbuckets = Max(nbuckets, 1024);
786 /* ... and force it to be a power of 2. */
787 nbuckets = pg_nextpower2_32(nbuckets);
788
789 /*
790 * If there's not enough space to store the projected number of tuples and
791 * the required bucket headers, we will need multiple batches.
792 */
793 bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
794 if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
795 {
796 /* We'll need multiple batches */
797 size_t sbuckets;
798 double dbatch;
799 int minbatch;
800 size_t bucket_size;
801
802 /*
803 * If Parallel Hash with combined hash_mem would still need multiple
804 * batches, we'll have to fall back to regular hash_mem budget.
805 */
806 if (try_combined_hash_mem)
807 {
808 ExecChooseHashTableSize(ntuples, tupwidth, useskew,
809 false, parallel_workers,
810 space_allowed,
811 numbuckets,
812 numbatches,
813 num_skew_mcvs);
814 return;
815 }
816
817 /*
818 * Estimate the number of buckets we'll want to have when hash_mem is
819 * entirely full. Each bucket will contain a bucket pointer plus
820 * NTUP_PER_BUCKET tuples, whose projected size already includes
821 * overhead for the hash code, pointer to the next tuple, etc.
822 */
823 bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
824 if (hash_table_bytes <= bucket_size)
825 sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
826 else
827 sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
828 sbuckets = Min(sbuckets, max_pointers);
829 nbuckets = (int) sbuckets;
830 nbuckets = pg_nextpower2_32(nbuckets);
831 bucket_bytes = nbuckets * sizeof(HashJoinTuple);
832
833 /*
834 * Buckets are simple pointers to hashjoin tuples, while tupsize
835 * includes the pointer, hash code, and MinimalTupleData. So buckets
836 * should never really exceed 25% of hash_mem (even for
837 * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
838 * 2^N bytes, where we might get more because of doubling. So let's
839 * look for 50% here.
840 */
841 Assert(bucket_bytes <= hash_table_bytes / 2);
842
843 /* Calculate required number of batches. */
844 dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
845 dbatch = Min(dbatch, max_pointers);
846 minbatch = (int) dbatch;
847 nbatch = pg_nextpower2_32(Max(2, minbatch));
848 }
849
850 /*
851 * Optimize the total amount of memory consumed by the hash node.
852 *
853 * The nbatch calculation above focuses on the in-memory hash table,
854 * assuming no per-batch overhead. But each batch may have two files, each
855 * with a BLCKSZ buffer. For large nbatch values these buffers may use
856 * significantly more memory than the hash table.
857 *
858 * The total memory usage may be expressed by this formula:
859 *
860 * (inner_rel_bytes / nbatch) + (2 * nbatch * BLCKSZ)
861 *
862 * where (inner_rel_bytes / nbatch) is the size of the in-memory hash
863 * table and (2 * nbatch * BLCKSZ) is the amount of memory used by file
864 * buffers.
865 *
866 * The nbatch calculation however ignores the second part. And for very
867 * large inner_rel_bytes, there may be no nbatch that keeps total memory
868 * usage under the budget (work_mem * hash_mem_multiplier). To deal with
869 * that, we will adjust nbatch to minimize total memory consumption across
870 * both the hashtable and file buffers.
871 *
872 * As we increase the size of the hashtable, the number of batches
873 * decreases, and the total memory usage follows a U-shaped curve. We find
874 * the minimum nbatch by "walking back" -- checking if halving nbatch
875 * would lower the total memory usage. We stop when it no longer helps.
876 *
877 * We only reduce the number of batches. Adding batches reduces memory
878 * usage only when most of the memory is used by the hash table, with
879 * total memory usage within the limit or not far from it. We don't want
880 * to start batching when not needed, even if that would reduce memory
881 * usage.
882 *
883 * While growing the hashtable, we also adjust the number of buckets to
884 * maintain a load factor of NTUP_PER_BUCKET while squeezing tuples back
885 * from batches into the hashtable.
886 *
887 * Note that we can only change nbuckets during initial hashtable sizing.
888 * Once we start building the hash, nbuckets is fixed (we may still grow
889 * the hash table).
890 *
891 * We double several parameters (space_allowed, nbuckets, num_skew_mcvs),
892 * which introduces a risk of overflow. We avoid this by exiting the loop.
893 * We could do something smarter (e.g. capping nbuckets and continue), but
894 * the complexity is not worth it. Such cases are extremely rare, and this
895 * is a best-effort attempt to reduce memory usage.
896 */
897 while (nbatch > 1)
898 {
899 /* Check that buckets won't overflow MaxAllocSize */
900 if (nbuckets > (MaxAllocSize / sizeof(HashJoinTuple) / 2))
901 break;
902
903 /* num_skew_mcvs should be less than nbuckets */
904 Assert((*num_skew_mcvs) < (INT_MAX / 2));
905
906 /*
907 * Check that space_allowed won't overflow SIZE_MAX.
908 *
909 * We don't use hash_table_bytes here, because it does not include the
910 * skew buckets. And we want to limit the overall memory limit.
911 */
912 if ((*space_allowed) > (SIZE_MAX / 2))
913 break;
914
915 /*
916 * Will halving the number of batches and doubling the size of the
917 * hashtable reduce overall memory usage?
918 *
919 * This is the same as (S = space_allowed):
920 *
921 * (S + 2 * nbatch * BLCKSZ) < (S * 2 + nbatch * BLCKSZ)
922 *
923 * but avoiding intermediate overflow.
924 */
925 if (nbatch < (*space_allowed) / BLCKSZ)
926 break;
927
928 /*
929 * MaxAllocSize is sufficiently small that we are not worried about
930 * overflowing nbuckets.
931 */
932 nbuckets *= 2;
933
934 *num_skew_mcvs = (*num_skew_mcvs) * 2;
935 *space_allowed = (*space_allowed) * 2;
936
937 nbatch /= 2;
938 }
939
940 Assert(nbuckets > 0);
941 Assert(nbatch > 0);
942
943 *numbuckets = nbuckets;
944 *numbatches = nbatch;
945}
946
947
948/* ----------------------------------------------------------------
949 * ExecHashTableDestroy
950 *
951 * destroy a hash table
952 * ----------------------------------------------------------------
953 */
954void
956{
957 int i;
958
959 /*
960 * Make sure all the temp files are closed. We skip batch 0, since it
961 * can't have any temp files (and the arrays might not even exist if
962 * nbatch is only 1). Parallel hash joins don't use these files.
963 */
964 if (hashtable->innerBatchFile != NULL)
965 {
966 for (i = 1; i < hashtable->nbatch; i++)
967 {
968 if (hashtable->innerBatchFile[i])
969 BufFileClose(hashtable->innerBatchFile[i]);
970 if (hashtable->outerBatchFile[i])
971 BufFileClose(hashtable->outerBatchFile[i]);
972 }
973 }
974
975 /* Release working memory (batchCxt is a child, so it goes away too) */
976 MemoryContextDelete(hashtable->hashCxt);
977
978 /* And drop the control block */
979 pfree(hashtable);
980}
981
982/*
983 * Consider adjusting the allowed hash table size, depending on the number
984 * of batches, to minimize the overall memory usage (for both the hashtable
985 * and batch files).
986 *
987 * We're adjusting the size of the hash table, not the (optimal) number of
988 * buckets. We can't change that once we start building the hash, due to how
989 * ExecHashGetBucketAndBatch calculates batchno/bucketno from the hash. This
990 * means the load factor may not be optimal, but we're in damage control so
991 * we accept slower lookups. It's still much better than batch explosion.
992 *
993 * Returns true if we chose to increase the batch size (and thus we don't
994 * need to add batches), and false if we should increase nbatch.
995 */
996static bool
998{
999 /*
1000 * How much additional memory would doubling nbatch use? Each batch may
1001 * require two buffered files (inner/outer), with a BLCKSZ buffer.
1002 */
1003 size_t batchSpace = (hashtable->nbatch * 2 * (size_t) BLCKSZ);
1004
1005 /*
1006 * Compare the new space needed for doubling nbatch and for enlarging the
1007 * in-memory hash table. If doubling the hash table needs less memory,
1008 * just do that. Otherwise, continue with doubling the nbatch.
1009 *
1010 * We're either doubling spaceAllowed or batchSpace, so which of those
1011 * increases the memory usage the least is the same as comparing the
1012 * values directly.
1013 */
1014 if (hashtable->spaceAllowed <= batchSpace)
1015 {
1016 hashtable->spaceAllowed *= 2;
1017 return true;
1018 }
1019
1020 return false;
1021}
1022
1023/*
1024 * ExecHashIncreaseNumBatches
1025 * increase the original number of batches in order to reduce
1026 * current memory consumption
1027 */
1028static void
1030{
1031 int oldnbatch = hashtable->nbatch;
1032 int curbatch = hashtable->curbatch;
1033 int nbatch;
1034 long ninmemory;
1035 long nfreed;
1036 HashMemoryChunk oldchunks;
1037
1038 /* do nothing if we've decided to shut off growth */
1039 if (!hashtable->growEnabled)
1040 return;
1041
1042 /* safety check to avoid overflow */
1043 if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
1044 return;
1045
1046 /* consider increasing size of the in-memory hash table instead */
1047 if (ExecHashIncreaseBatchSize(hashtable))
1048 return;
1049
1050 nbatch = oldnbatch * 2;
1051 Assert(nbatch > 1);
1052
1053#ifdef HJDEBUG
1054 printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
1055 hashtable, nbatch, hashtable->spaceUsed);
1056#endif
1057
1058 if (hashtable->innerBatchFile == NULL)
1059 {
1060 MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
1061
1062 /* we had no file arrays before */
1063 hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
1064 hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
1065
1066 MemoryContextSwitchTo(oldcxt);
1067
1068 /* time to establish the temp tablespaces, too */
1070 }
1071 else
1072 {
1073 /* enlarge arrays and zero out added entries */
1074 hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
1075 hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
1076 }
1077
1078 hashtable->nbatch = nbatch;
1079
1080 /*
1081 * Scan through the existing hash table entries and dump out any that are
1082 * no longer of the current batch.
1083 */
1084 ninmemory = nfreed = 0;
1085
1086 /* If know we need to resize nbuckets, we can do it while rebatching. */
1087 if (hashtable->nbuckets_optimal != hashtable->nbuckets)
1088 {
1089 /* we never decrease the number of buckets */
1090 Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
1091
1092 hashtable->nbuckets = hashtable->nbuckets_optimal;
1093 hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1094
1095 hashtable->buckets.unshared =
1096 repalloc_array(hashtable->buckets.unshared,
1097 HashJoinTuple, hashtable->nbuckets);
1098 }
1099
1100 /*
1101 * We will scan through the chunks directly, so that we can reset the
1102 * buckets now and not have to keep track which tuples in the buckets have
1103 * already been processed. We will free the old chunks as we go.
1104 */
1105 memset(hashtable->buckets.unshared, 0,
1106 sizeof(HashJoinTuple) * hashtable->nbuckets);
1107 oldchunks = hashtable->chunks;
1108 hashtable->chunks = NULL;
1109
1110 /* so, let's scan through the old chunks, and all tuples in each chunk */
1111 while (oldchunks != NULL)
1112 {
1113 HashMemoryChunk nextchunk = oldchunks->next.unshared;
1114
1115 /* position within the buffer (up to oldchunks->used) */
1116 size_t idx = 0;
1117
1118 /* process all tuples stored in this chunk (and then free it) */
1119 while (idx < oldchunks->used)
1120 {
1121 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
1122 MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1123 int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1124 int bucketno;
1125 int batchno;
1126
1127 ninmemory++;
1128 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1129 &bucketno, &batchno);
1130
1131 if (batchno == curbatch)
1132 {
1133 /* keep tuple in memory - copy it into the new chunk */
1134 HashJoinTuple copyTuple;
1135
1136 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1137 memcpy(copyTuple, hashTuple, hashTupleSize);
1138
1139 /* and add it back to the appropriate bucket */
1140 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1141 hashtable->buckets.unshared[bucketno] = copyTuple;
1142 }
1143 else
1144 {
1145 /* dump it out */
1146 Assert(batchno > curbatch);
1148 hashTuple->hashvalue,
1149 &hashtable->innerBatchFile[batchno],
1150 hashtable);
1151
1152 hashtable->spaceUsed -= hashTupleSize;
1153 nfreed++;
1154 }
1155
1156 /* next tuple in this chunk */
1157 idx += MAXALIGN(hashTupleSize);
1158
1159 /* allow this loop to be cancellable */
1161 }
1162
1163 /* we're done with this chunk - free it and proceed to the next one */
1164 pfree(oldchunks);
1165 oldchunks = nextchunk;
1166 }
1167
1168#ifdef HJDEBUG
1169 printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1170 hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1171#endif
1172
1173 /*
1174 * If we dumped out either all or none of the tuples in the table, disable
1175 * further expansion of nbatch. This situation implies that we have
1176 * enough tuples of identical hashvalues to overflow spaceAllowed.
1177 * Increasing nbatch will not fix it since there's no way to subdivide the
1178 * group any more finely. We have to just gut it out and hope the server
1179 * has enough RAM.
1180 */
1181 if (nfreed == 0 || nfreed == ninmemory)
1182 {
1183 hashtable->growEnabled = false;
1184#ifdef HJDEBUG
1185 printf("Hashjoin %p: disabling further increase of nbatch\n",
1186 hashtable);
1187#endif
1188 }
1189}
1190
1191/*
1192 * ExecParallelHashIncreaseNumBatches
1193 * Every participant attached to grow_batches_barrier must run this
1194 * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1195 */
1196static void
1198{
1199 ParallelHashJoinState *pstate = hashtable->parallel_state;
1200
1202
1203 /*
1204 * It's unlikely, but we need to be prepared for new participants to show
1205 * up while we're in the middle of this operation so we need to switch on
1206 * barrier phase here.
1207 */
1209 {
1211
1212 /*
1213 * Elect one participant to prepare to grow the number of batches.
1214 * This involves reallocating or resetting the buckets of batch 0
1215 * in preparation for all participants to begin repartitioning the
1216 * tuples.
1217 */
1219 WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1220 {
1221 dsa_pointer_atomic *buckets;
1222 ParallelHashJoinBatch *old_batch0;
1223 int new_nbatch;
1224 int i;
1225
1226 /* Move the old batch out of the way. */
1227 old_batch0 = hashtable->batches[0].shared;
1228 pstate->old_batches = pstate->batches;
1229 pstate->old_nbatch = hashtable->nbatch;
1230 pstate->batches = InvalidDsaPointer;
1231
1232 /* Free this backend's old accessors. */
1234
1235 /* Figure out how many batches to use. */
1236 if (hashtable->nbatch == 1)
1237 {
1238 /*
1239 * We are going from single-batch to multi-batch. We need
1240 * to switch from one large combined memory budget to the
1241 * regular hash_mem budget.
1242 */
1244
1245 /*
1246 * The combined hash_mem of all participants wasn't
1247 * enough. Therefore one batch per participant would be
1248 * approximately equivalent and would probably also be
1249 * insufficient. So try two batches per participant,
1250 * rounded up to a power of two.
1251 */
1252 new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1253 }
1254 else
1255 {
1256 /*
1257 * We were already multi-batched. Try doubling the number
1258 * of batches.
1259 */
1260 new_nbatch = hashtable->nbatch * 2;
1261 }
1262
1263 /* Allocate new larger generation of batches. */
1264 Assert(hashtable->nbatch == pstate->nbatch);
1265 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1266 Assert(hashtable->nbatch == pstate->nbatch);
1267
1268 /* Replace or recycle batch 0's bucket array. */
1269 if (pstate->old_nbatch == 1)
1270 {
1271 double dtuples;
1272 double dbuckets;
1273 int new_nbuckets;
1274 uint32 max_buckets;
1275
1276 /*
1277 * We probably also need a smaller bucket array. How many
1278 * tuples do we expect per batch, assuming we have only
1279 * half of them so far? Normally we don't need to change
1280 * the bucket array's size, because the size of each batch
1281 * stays the same as we add more batches, but in this
1282 * special case we move from a large batch to many smaller
1283 * batches and it would be wasteful to keep the large
1284 * array.
1285 */
1286 dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1287
1288 /*
1289 * We need to calculate the maximum number of buckets to
1290 * stay within the MaxAllocSize boundary. Round the
1291 * maximum number to the previous power of 2 given that
1292 * later we round the number to the next power of 2.
1293 */
1294 max_buckets = pg_prevpower2_32((uint32)
1295 (MaxAllocSize / sizeof(dsa_pointer_atomic)));
1296 dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
1297 dbuckets = Min(dbuckets, max_buckets);
1298 new_nbuckets = (int) dbuckets;
1299 new_nbuckets = Max(new_nbuckets, 1024);
1300 new_nbuckets = pg_nextpower2_32(new_nbuckets);
1301 dsa_free(hashtable->area, old_batch0->buckets);
1302 hashtable->batches[0].shared->buckets =
1303 dsa_allocate(hashtable->area,
1304 sizeof(dsa_pointer_atomic) * new_nbuckets);
1305 buckets = (dsa_pointer_atomic *)
1306 dsa_get_address(hashtable->area,
1307 hashtable->batches[0].shared->buckets);
1308 for (i = 0; i < new_nbuckets; ++i)
1310 pstate->nbuckets = new_nbuckets;
1311 }
1312 else
1313 {
1314 /* Recycle the existing bucket array. */
1315 hashtable->batches[0].shared->buckets = old_batch0->buckets;
1316 buckets = (dsa_pointer_atomic *)
1317 dsa_get_address(hashtable->area, old_batch0->buckets);
1318 for (i = 0; i < hashtable->nbuckets; ++i)
1320 }
1321
1322 /* Move all chunks to the work queue for parallel processing. */
1323 pstate->chunk_work_queue = old_batch0->chunks;
1324
1325 /* Disable further growth temporarily while we're growing. */
1326 pstate->growth = PHJ_GROWTH_DISABLED;
1327 }
1328 else
1329 {
1330 /* All other participants just flush their tuples to disk. */
1332 }
1333 /* Fall through. */
1334
1336 /* Wait for the above to be finished. */
1338 WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1339 /* Fall through. */
1340
1342 /* Make sure that we have the current dimensions and buckets. */
1345 /* Then partition, flush counters. */
1349 /* Wait for the above to be finished. */
1351 WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1352 /* Fall through. */
1353
1355
1356 /*
1357 * Elect one participant to clean up and decide whether further
1358 * repartitioning is needed, or should be disabled because it's
1359 * not helping.
1360 */
1362 WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1363 {
1364 ParallelHashJoinBatch *old_batches;
1365 bool space_exhausted = false;
1366 bool extreme_skew_detected = false;
1367
1368 /* Make sure that we have the current dimensions and buckets. */
1371
1372 old_batches = dsa_get_address(hashtable->area, pstate->old_batches);
1373
1374 /* Are any of the new generation of batches exhausted? */
1375 for (int i = 0; i < hashtable->nbatch; ++i)
1376 {
1377 ParallelHashJoinBatch *batch;
1378 ParallelHashJoinBatch *old_batch;
1379 int parent;
1380
1381 batch = hashtable->batches[i].shared;
1382 if (batch->space_exhausted ||
1383 batch->estimated_size > pstate->space_allowed)
1384 space_exhausted = true;
1385
1386 parent = i % pstate->old_nbatch;
1387 old_batch = NthParallelHashJoinBatch(old_batches, parent);
1388 if (old_batch->space_exhausted ||
1389 batch->estimated_size > pstate->space_allowed)
1390 {
1391 /*
1392 * Did this batch receive ALL of the tuples from its
1393 * parent batch? That would indicate that further
1394 * repartitioning isn't going to help (the hash values
1395 * are probably all the same).
1396 */
1397 if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1398 extreme_skew_detected = true;
1399 }
1400 }
1401
1402 /* Don't keep growing if it's not helping or we'd overflow. */
1403 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1404 pstate->growth = PHJ_GROWTH_DISABLED;
1405 else if (space_exhausted)
1407 else
1408 pstate->growth = PHJ_GROWTH_OK;
1409
1410 /* Free the old batches in shared memory. */
1411 dsa_free(hashtable->area, pstate->old_batches);
1413 }
1414 /* Fall through. */
1415
1417 /* Wait for the above to complete. */
1419 WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1420 }
1421}
1422
1423/*
1424 * Repartition the tuples currently loaded into memory for inner batch 0
1425 * because the number of batches has been increased. Some tuples are retained
1426 * in memory and some are written out to a later batch.
1427 */
1428static void
1430{
1431 dsa_pointer chunk_shared;
1432 HashMemoryChunk chunk;
1433
1434 Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1435
1436 while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1437 {
1438 size_t idx = 0;
1439
1440 /* Repartition all tuples in this chunk. */
1441 while (idx < chunk->used)
1442 {
1443 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1444 MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1445 HashJoinTuple copyTuple;
1446 dsa_pointer shared;
1447 int bucketno;
1448 int batchno;
1449
1450 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1451 &bucketno, &batchno);
1452
1453 Assert(batchno < hashtable->nbatch);
1454 if (batchno == 0)
1455 {
1456 /* It still belongs in batch 0. Copy to a new chunk. */
1457 copyTuple =
1459 HJTUPLE_OVERHEAD + tuple->t_len,
1460 &shared);
1461 copyTuple->hashvalue = hashTuple->hashvalue;
1462 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1463 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1464 copyTuple, shared);
1465 }
1466 else
1467 {
1468 size_t tuple_size =
1470
1471 /* It belongs in a later batch. */
1472 hashtable->batches[batchno].estimated_size += tuple_size;
1473 sts_puttuple(hashtable->batches[batchno].inner_tuples,
1474 &hashTuple->hashvalue, tuple);
1475 }
1476
1477 /* Count this tuple. */
1478 ++hashtable->batches[0].old_ntuples;
1479 ++hashtable->batches[batchno].ntuples;
1480
1482 HJTUPLE_MINTUPLE(hashTuple)->t_len);
1483 }
1484
1485 /* Free this chunk. */
1486 dsa_free(hashtable->area, chunk_shared);
1487
1489 }
1490}
1491
1492/*
1493 * Help repartition inner batches 1..n.
1494 */
1495static void
1497{
1498 ParallelHashJoinState *pstate = hashtable->parallel_state;
1499 int old_nbatch = pstate->old_nbatch;
1500 SharedTuplestoreAccessor **old_inner_tuples;
1501 ParallelHashJoinBatch *old_batches;
1502 int i;
1503
1504 /* Get our hands on the previous generation of batches. */
1505 old_batches = (ParallelHashJoinBatch *)
1506 dsa_get_address(hashtable->area, pstate->old_batches);
1507 old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
1508 for (i = 1; i < old_nbatch; ++i)
1509 {
1510 ParallelHashJoinBatch *shared =
1511 NthParallelHashJoinBatch(old_batches, i);
1512
1513 old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1515 &pstate->fileset);
1516 }
1517
1518 /* Join in the effort to repartition them. */
1519 for (i = 1; i < old_nbatch; ++i)
1520 {
1521 MinimalTuple tuple;
1522 uint32 hashvalue;
1523
1524 /* Scan one partition from the previous generation. */
1525 sts_begin_parallel_scan(old_inner_tuples[i]);
1526 while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1527 {
1528 size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1529 int bucketno;
1530 int batchno;
1531
1532 /* Decide which partition it goes to in the new generation. */
1533 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1534 &batchno);
1535
1536 hashtable->batches[batchno].estimated_size += tuple_size;
1537 ++hashtable->batches[batchno].ntuples;
1538 ++hashtable->batches[i].old_ntuples;
1539
1540 /* Store the tuple its new batch. */
1541 sts_puttuple(hashtable->batches[batchno].inner_tuples,
1542 &hashvalue, tuple);
1543
1545 }
1546 sts_end_parallel_scan(old_inner_tuples[i]);
1547 }
1548
1549 pfree(old_inner_tuples);
1550}
1551
1552/*
1553 * Transfer the backend-local per-batch counters to the shared totals.
1554 */
1555static void
1557{
1558 ParallelHashJoinState *pstate = hashtable->parallel_state;
1559 int i;
1560
1561 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1562 pstate->total_tuples = 0;
1563 for (i = 0; i < hashtable->nbatch; ++i)
1564 {
1565 ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1566
1567 batch->shared->size += batch->size;
1568 batch->shared->estimated_size += batch->estimated_size;
1569 batch->shared->ntuples += batch->ntuples;
1570 batch->shared->old_ntuples += batch->old_ntuples;
1571 batch->size = 0;
1572 batch->estimated_size = 0;
1573 batch->ntuples = 0;
1574 batch->old_ntuples = 0;
1575 pstate->total_tuples += batch->shared->ntuples;
1576 }
1577 LWLockRelease(&pstate->lock);
1578}
1579
1580/*
1581 * ExecHashIncreaseNumBuckets
1582 * increase the original number of buckets in order to reduce
1583 * number of tuples per bucket
1584 */
1585static void
1587{
1588 HashMemoryChunk chunk;
1589
1590 /* do nothing if not an increase (it's called increase for a reason) */
1591 if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
1592 return;
1593
1594#ifdef HJDEBUG
1595 printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1596 hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1597#endif
1598
1599 hashtable->nbuckets = hashtable->nbuckets_optimal;
1600 hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1601
1602 Assert(hashtable->nbuckets > 1);
1603 Assert(hashtable->nbuckets <= (INT_MAX / 2));
1604 Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1605
1606 /*
1607 * Just reallocate the proper number of buckets - we don't need to walk
1608 * through them - we can walk the dense-allocated chunks (just like in
1609 * ExecHashIncreaseNumBatches, but without all the copying into new
1610 * chunks)
1611 */
1612 hashtable->buckets.unshared =
1613 repalloc_array(hashtable->buckets.unshared,
1614 HashJoinTuple, hashtable->nbuckets);
1615
1616 memset(hashtable->buckets.unshared, 0,
1617 hashtable->nbuckets * sizeof(HashJoinTuple));
1618
1619 /* scan through all tuples in all chunks to rebuild the hash table */
1620 for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1621 {
1622 /* process all tuples stored in this chunk */
1623 size_t idx = 0;
1624
1625 while (idx < chunk->used)
1626 {
1627 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1628 int bucketno;
1629 int batchno;
1630
1631 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1632 &bucketno, &batchno);
1633
1634 /* add the tuple to the proper bucket */
1635 hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1636 hashtable->buckets.unshared[bucketno] = hashTuple;
1637
1638 /* advance index past the tuple */
1640 HJTUPLE_MINTUPLE(hashTuple)->t_len);
1641 }
1642
1643 /* allow this loop to be cancellable */
1645 }
1646}
1647
1648static void
1650{
1651 ParallelHashJoinState *pstate = hashtable->parallel_state;
1652 int i;
1653 HashMemoryChunk chunk;
1654 dsa_pointer chunk_s;
1655
1657
1658 /*
1659 * It's unlikely, but we need to be prepared for new participants to show
1660 * up while we're in the middle of this operation so we need to switch on
1661 * barrier phase here.
1662 */
1664 {
1666 /* Elect one participant to prepare to increase nbuckets. */
1668 WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1669 {
1670 size_t size;
1671 dsa_pointer_atomic *buckets;
1672
1673 /* Double the size of the bucket array. */
1674 pstate->nbuckets *= 2;
1675 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1676 hashtable->batches[0].shared->size += size / 2;
1677 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1678 hashtable->batches[0].shared->buckets =
1679 dsa_allocate(hashtable->area, size);
1680 buckets = (dsa_pointer_atomic *)
1681 dsa_get_address(hashtable->area,
1682 hashtable->batches[0].shared->buckets);
1683 for (i = 0; i < pstate->nbuckets; ++i)
1685
1686 /* Put the chunk list onto the work queue. */
1687 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1688
1689 /* Clear the flag. */
1690 pstate->growth = PHJ_GROWTH_OK;
1691 }
1692 /* Fall through. */
1693
1695 /* Wait for the above to complete. */
1697 WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1698 /* Fall through. */
1699
1701 /* Reinsert all tuples into the hash table. */
1704 while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1705 {
1706 size_t idx = 0;
1707
1708 while (idx < chunk->used)
1709 {
1710 HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1711 dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1712 int bucketno;
1713 int batchno;
1714
1715 ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1716 &bucketno, &batchno);
1717 Assert(batchno == 0);
1718
1719 /* add the tuple to the proper bucket */
1720 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1721 hashTuple, shared);
1722
1723 /* advance index past the tuple */
1725 HJTUPLE_MINTUPLE(hashTuple)->t_len);
1726 }
1727
1728 /* allow this loop to be cancellable */
1730 }
1732 WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1733 }
1734}
1735
1736/*
1737 * ExecHashTableInsert
1738 * insert a tuple into the hash table depending on the hash value
1739 * it may just go to a temp file for later batches
1740 *
1741 * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1742 * tuple; the minimal case in particular is certain to happen while reloading
1743 * tuples from batch files. We could save some cycles in the regular-tuple
1744 * case by not forcing the slot contents into minimal form; not clear if it's
1745 * worth the messiness required.
1746 */
1747void
1749 TupleTableSlot *slot,
1750 uint32 hashvalue)
1751{
1752 bool shouldFree;
1753 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1754 int bucketno;
1755 int batchno;
1756
1757 ExecHashGetBucketAndBatch(hashtable, hashvalue,
1758 &bucketno, &batchno);
1759
1760 /*
1761 * decide whether to put the tuple in the hash table or a temp file
1762 */
1763 if (batchno == hashtable->curbatch)
1764 {
1765 /*
1766 * put the tuple in hash table
1767 */
1768 HashJoinTuple hashTuple;
1769 int hashTupleSize;
1770 double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1771
1772 /* Create the HashJoinTuple */
1773 hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
1774 hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1775
1776 hashTuple->hashvalue = hashvalue;
1777 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1778
1779 /*
1780 * We always reset the tuple-matched flag on insertion. This is okay
1781 * even when reloading a tuple from a batch file, since the tuple
1782 * could not possibly have been matched to an outer tuple before it
1783 * went into the batch file.
1784 */
1786
1787 /* Push it onto the front of the bucket's list */
1788 hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1789 hashtable->buckets.unshared[bucketno] = hashTuple;
1790
1791 /*
1792 * Increase the (optimal) number of buckets if we just exceeded the
1793 * NTUP_PER_BUCKET threshold, but only when there's still a single
1794 * batch.
1795 */
1796 if (hashtable->nbatch == 1 &&
1797 ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1798 {
1799 /* Guard against integer overflow and alloc size overflow */
1800 if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1801 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1802 {
1803 hashtable->nbuckets_optimal *= 2;
1804 hashtable->log2_nbuckets_optimal += 1;
1805 }
1806 }
1807
1808 /* Account for space used, and back off if we've used too much */
1809 hashtable->spaceUsed += hashTupleSize;
1810 if (hashtable->spaceUsed > hashtable->spacePeak)
1811 hashtable->spacePeak = hashtable->spaceUsed;
1812 if (hashtable->spaceUsed +
1813 hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
1814 > hashtable->spaceAllowed)
1815 ExecHashIncreaseNumBatches(hashtable);
1816 }
1817 else
1818 {
1819 /*
1820 * put the tuple into a temp file for later batches
1821 */
1822 Assert(batchno > hashtable->curbatch);
1824 hashvalue,
1825 &hashtable->innerBatchFile[batchno],
1826 hashtable);
1827 }
1828
1829 if (shouldFree)
1831}
1832
1833/*
1834 * ExecParallelHashTableInsert
1835 * insert a tuple into a shared hash table or shared batch tuplestore
1836 */
1837void
1839 TupleTableSlot *slot,
1840 uint32 hashvalue)
1841{
1842 bool shouldFree;
1843 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1844 dsa_pointer shared;
1845 int bucketno;
1846 int batchno;
1847
1848retry:
1849 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1850
1851 if (batchno == 0)
1852 {
1853 HashJoinTuple hashTuple;
1854
1855 /* Try to load it into memory. */
1858 hashTuple = ExecParallelHashTupleAlloc(hashtable,
1859 HJTUPLE_OVERHEAD + tuple->t_len,
1860 &shared);
1861 if (hashTuple == NULL)
1862 goto retry;
1863
1864 /* Store the hash value in the HashJoinTuple header. */
1865 hashTuple->hashvalue = hashvalue;
1866 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1868
1869 /* Push it onto the front of the bucket's list */
1870 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1871 hashTuple, shared);
1872 }
1873 else
1874 {
1875 size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1876
1877 Assert(batchno > 0);
1878
1879 /* Try to preallocate space in the batch if necessary. */
1880 if (hashtable->batches[batchno].preallocated < tuple_size)
1881 {
1882 if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1883 goto retry;
1884 }
1885
1886 Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1887 hashtable->batches[batchno].preallocated -= tuple_size;
1888 sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1889 tuple);
1890 }
1891 ++hashtable->batches[batchno].ntuples;
1892
1893 if (shouldFree)
1895}
1896
1897/*
1898 * Insert a tuple into the current hash table. Unlike
1899 * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1900 * to other batches or to run out of memory, and should only be called with
1901 * tuples that belong in the current batch once growth has been disabled.
1902 */
1903void
1905 TupleTableSlot *slot,
1906 uint32 hashvalue)
1907{
1908 bool shouldFree;
1909 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1910 HashJoinTuple hashTuple;
1911 dsa_pointer shared;
1912 int batchno;
1913 int bucketno;
1914
1915 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1916 Assert(batchno == hashtable->curbatch);
1917 hashTuple = ExecParallelHashTupleAlloc(hashtable,
1918 HJTUPLE_OVERHEAD + tuple->t_len,
1919 &shared);
1920 hashTuple->hashvalue = hashvalue;
1921 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1923 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1924 hashTuple, shared);
1925
1926 if (shouldFree)
1928}
1929
1930
1931/*
1932 * ExecHashGetBucketAndBatch
1933 * Determine the bucket number and batch number for a hash value
1934 *
1935 * Note: on-the-fly increases of nbatch must not change the bucket number
1936 * for a given hash code (since we don't move tuples to different hash
1937 * chains), and must only cause the batch number to remain the same or
1938 * increase. Our algorithm is
1939 * bucketno = hashvalue MOD nbuckets
1940 * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1941 * where nbuckets and nbatch are both expected to be powers of 2, so we can
1942 * do the computations by shifting and masking. (This assumes that all hash
1943 * functions are good about randomizing all their output bits, else we are
1944 * likely to have very skewed bucket or batch occupancy.)
1945 *
1946 * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1947 * bucket count growth. Once we start batching, the value is fixed and does
1948 * not change over the course of the join (making it possible to compute batch
1949 * number the way we do here).
1950 *
1951 * nbatch is always a power of 2; we increase it only by doubling it. This
1952 * effectively adds one more bit to the top of the batchno. In very large
1953 * joins, we might run out of bits to add, so we do this by rotating the hash
1954 * value. This causes batchno to steal bits from bucketno when the number of
1955 * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1956 * than to lose the ability to divide batches.
1957 */
1958void
1960 uint32 hashvalue,
1961 int *bucketno,
1962 int *batchno)
1963{
1964 uint32 nbuckets = (uint32) hashtable->nbuckets;
1965 uint32 nbatch = (uint32) hashtable->nbatch;
1966
1967 if (nbatch > 1)
1968 {
1969 *bucketno = hashvalue & (nbuckets - 1);
1970 *batchno = pg_rotate_right32(hashvalue,
1971 hashtable->log2_nbuckets) & (nbatch - 1);
1972 }
1973 else
1974 {
1975 *bucketno = hashvalue & (nbuckets - 1);
1976 *batchno = 0;
1977 }
1978}
1979
1980/*
1981 * ExecScanHashBucket
1982 * scan a hash bucket for matches to the current outer tuple
1983 *
1984 * The current outer tuple must be stored in econtext->ecxt_outertuple.
1985 *
1986 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1987 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1988 * for the latter.
1989 */
1990bool
1992 ExprContext *econtext)
1993{
1994 ExprState *hjclauses = hjstate->hashclauses;
1995 HashJoinTable hashtable = hjstate->hj_HashTable;
1996 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
1997 uint32 hashvalue = hjstate->hj_CurHashValue;
1998
1999 /*
2000 * hj_CurTuple is the address of the tuple last returned from the current
2001 * bucket, or NULL if it's time to start scanning a new bucket.
2002 *
2003 * If the tuple hashed to a skew bucket then scan the skew bucket
2004 * otherwise scan the standard hashtable bucket.
2005 */
2006 if (hashTuple != NULL)
2007 hashTuple = hashTuple->next.unshared;
2008 else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
2009 hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
2010 else
2011 hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2012
2013 while (hashTuple != NULL)
2014 {
2015 if (hashTuple->hashvalue == hashvalue)
2016 {
2017 TupleTableSlot *inntuple;
2018
2019 /* insert hashtable's tuple into exec slot so ExecQual sees it */
2020 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2021 hjstate->hj_HashTupleSlot,
2022 false); /* do not pfree */
2023 econtext->ecxt_innertuple = inntuple;
2024
2025 if (ExecQualAndReset(hjclauses, econtext))
2026 {
2027 hjstate->hj_CurTuple = hashTuple;
2028 return true;
2029 }
2030 }
2031
2032 hashTuple = hashTuple->next.unshared;
2033 }
2034
2035 /*
2036 * no match
2037 */
2038 return false;
2039}
2040
2041/*
2042 * ExecParallelScanHashBucket
2043 * scan a hash bucket for matches to the current outer tuple
2044 *
2045 * The current outer tuple must be stored in econtext->ecxt_outertuple.
2046 *
2047 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2048 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2049 * for the latter.
2050 */
2051bool
2053 ExprContext *econtext)
2054{
2055 ExprState *hjclauses = hjstate->hashclauses;
2056 HashJoinTable hashtable = hjstate->hj_HashTable;
2057 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2058 uint32 hashvalue = hjstate->hj_CurHashValue;
2059
2060 /*
2061 * hj_CurTuple is the address of the tuple last returned from the current
2062 * bucket, or NULL if it's time to start scanning a new bucket.
2063 */
2064 if (hashTuple != NULL)
2065 hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2066 else
2067 hashTuple = ExecParallelHashFirstTuple(hashtable,
2068 hjstate->hj_CurBucketNo);
2069
2070 while (hashTuple != NULL)
2071 {
2072 if (hashTuple->hashvalue == hashvalue)
2073 {
2074 TupleTableSlot *inntuple;
2075
2076 /* insert hashtable's tuple into exec slot so ExecQual sees it */
2077 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2078 hjstate->hj_HashTupleSlot,
2079 false); /* do not pfree */
2080 econtext->ecxt_innertuple = inntuple;
2081
2082 if (ExecQualAndReset(hjclauses, econtext))
2083 {
2084 hjstate->hj_CurTuple = hashTuple;
2085 return true;
2086 }
2087 }
2088
2089 hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2090 }
2091
2092 /*
2093 * no match
2094 */
2095 return false;
2096}
2097
2098/*
2099 * ExecPrepHashTableForUnmatched
2100 * set up for a series of ExecScanHashTableForUnmatched calls
2101 */
2102void
2104{
2105 /*----------
2106 * During this scan we use the HashJoinState fields as follows:
2107 *
2108 * hj_CurBucketNo: next regular bucket to scan
2109 * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2110 * hj_CurTuple: last tuple returned, or NULL to start next bucket
2111 *----------
2112 */
2113 hjstate->hj_CurBucketNo = 0;
2114 hjstate->hj_CurSkewBucketNo = 0;
2115 hjstate->hj_CurTuple = NULL;
2116}
2117
2118/*
2119 * Decide if this process is allowed to run the unmatched scan. If so, the
2120 * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2121 * Otherwise the batch is detached and false is returned.
2122 */
2123bool
2125{
2126 HashJoinTable hashtable = hjstate->hj_HashTable;
2127 int curbatch = hashtable->curbatch;
2128 ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2129
2131
2132 /*
2133 * It would not be deadlock-free to wait on the batch barrier, because it
2134 * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2135 * already emitted tuples. Therefore, we'll hold a wait-free election:
2136 * only one process can continue to the next phase, and all others detach
2137 * from this batch. They can still go any work on other batches, if there
2138 * are any.
2139 */
2141 {
2142 /* This process considers the batch to be done. */
2143 hashtable->batches[hashtable->curbatch].done = true;
2144
2145 /* Make sure any temporary files are closed. */
2146 sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2147 sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2148
2149 /*
2150 * Track largest batch we've seen, which would normally happen in
2151 * ExecHashTableDetachBatch().
2152 */
2153 hashtable->spacePeak =
2154 Max(hashtable->spacePeak,
2155 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2156 hashtable->curbatch = -1;
2157 return false;
2158 }
2159
2160 /* Now we are alone with this batch. */
2162
2163 /*
2164 * Has another process decided to give up early and command all processes
2165 * to skip the unmatched scan?
2166 */
2167 if (batch->skip_unmatched)
2168 {
2169 hashtable->batches[hashtable->curbatch].done = true;
2170 ExecHashTableDetachBatch(hashtable);
2171 return false;
2172 }
2173
2174 /* Now prepare the process local state, just as for non-parallel join. */
2176
2177 return true;
2178}
2179
2180/*
2181 * ExecScanHashTableForUnmatched
2182 * scan the hash table for unmatched inner tuples
2183 *
2184 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2185 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2186 * for the latter.
2187 */
2188bool
2190{
2191 HashJoinTable hashtable = hjstate->hj_HashTable;
2192 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2193
2194 for (;;)
2195 {
2196 /*
2197 * hj_CurTuple is the address of the tuple last returned from the
2198 * current bucket, or NULL if it's time to start scanning a new
2199 * bucket.
2200 */
2201 if (hashTuple != NULL)
2202 hashTuple = hashTuple->next.unshared;
2203 else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2204 {
2205 hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
2206 hjstate->hj_CurBucketNo++;
2207 }
2208 else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2209 {
2210 int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2211
2212 hashTuple = hashtable->skewBucket[j]->tuples;
2213 hjstate->hj_CurSkewBucketNo++;
2214 }
2215 else
2216 break; /* finished all buckets */
2217
2218 while (hashTuple != NULL)
2219 {
2221 {
2222 TupleTableSlot *inntuple;
2223
2224 /* insert hashtable's tuple into exec slot */
2225 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2226 hjstate->hj_HashTupleSlot,
2227 false); /* do not pfree */
2228 econtext->ecxt_innertuple = inntuple;
2229
2230 /*
2231 * Reset temp memory each time; although this function doesn't
2232 * do any qual eval, the caller will, so let's keep it
2233 * parallel to ExecScanHashBucket.
2234 */
2235 ResetExprContext(econtext);
2236
2237 hjstate->hj_CurTuple = hashTuple;
2238 return true;
2239 }
2240
2241 hashTuple = hashTuple->next.unshared;
2242 }
2243
2244 /* allow this loop to be cancellable */
2246 }
2247
2248 /*
2249 * no more unmatched tuples
2250 */
2251 return false;
2252}
2253
2254/*
2255 * ExecParallelScanHashTableForUnmatched
2256 * scan the hash table for unmatched inner tuples, in parallel join
2257 *
2258 * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2259 * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2260 * for the latter.
2261 */
2262bool
2264 ExprContext *econtext)
2265{
2266 HashJoinTable hashtable = hjstate->hj_HashTable;
2267 HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2268
2269 for (;;)
2270 {
2271 /*
2272 * hj_CurTuple is the address of the tuple last returned from the
2273 * current bucket, or NULL if it's time to start scanning a new
2274 * bucket.
2275 */
2276 if (hashTuple != NULL)
2277 hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2278 else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2279 hashTuple = ExecParallelHashFirstTuple(hashtable,
2280 hjstate->hj_CurBucketNo++);
2281 else
2282 break; /* finished all buckets */
2283
2284 while (hashTuple != NULL)
2285 {
2287 {
2288 TupleTableSlot *inntuple;
2289
2290 /* insert hashtable's tuple into exec slot */
2291 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2292 hjstate->hj_HashTupleSlot,
2293 false); /* do not pfree */
2294 econtext->ecxt_innertuple = inntuple;
2295
2296 /*
2297 * Reset temp memory each time; although this function doesn't
2298 * do any qual eval, the caller will, so let's keep it
2299 * parallel to ExecScanHashBucket.
2300 */
2301 ResetExprContext(econtext);
2302
2303 hjstate->hj_CurTuple = hashTuple;
2304 return true;
2305 }
2306
2307 hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2308 }
2309
2310 /* allow this loop to be cancellable */
2312 }
2313
2314 /*
2315 * no more unmatched tuples
2316 */
2317 return false;
2318}
2319
2320/*
2321 * ExecHashTableReset
2322 *
2323 * reset hash table header for new batch
2324 */
2325void
2327{
2328 MemoryContext oldcxt;
2329 int nbuckets = hashtable->nbuckets;
2330
2331 /*
2332 * Release all the hash buckets and tuples acquired in the prior pass, and
2333 * reinitialize the context for a new pass.
2334 */
2335 MemoryContextReset(hashtable->batchCxt);
2336 oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2337
2338 /* Reallocate and reinitialize the hash bucket headers. */
2339 hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2340
2341 hashtable->spaceUsed = 0;
2342
2343 MemoryContextSwitchTo(oldcxt);
2344
2345 /* Forget the chunks (the memory was freed by the context reset above). */
2346 hashtable->chunks = NULL;
2347}
2348
2349/*
2350 * ExecHashTableResetMatchFlags
2351 * Clear all the HeapTupleHeaderHasMatch flags in the table
2352 */
2353void
2355{
2356 HashJoinTuple tuple;
2357 int i;
2358
2359 /* Reset all flags in the main table ... */
2360 for (i = 0; i < hashtable->nbuckets; i++)
2361 {
2362 for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2363 tuple = tuple->next.unshared)
2365 }
2366
2367 /* ... and the same for the skew buckets, if any */
2368 for (i = 0; i < hashtable->nSkewBuckets; i++)
2369 {
2370 int j = hashtable->skewBucketNums[i];
2371 HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2372
2373 for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
2375 }
2376}
2377
2378
2379void
2381{
2383
2384 /*
2385 * if chgParam of subnode is not null then plan will be re-scanned by
2386 * first ExecProcNode.
2387 */
2388 if (outerPlan->chgParam == NULL)
2390}
2391
2392
2393/*
2394 * ExecHashBuildSkewHash
2395 *
2396 * Set up for skew optimization if we can identify the most common values
2397 * (MCVs) of the outer relation's join key. We make a skew hash bucket
2398 * for the hash value of each MCV, up to the number of slots allowed
2399 * based on available memory.
2400 */
2401static void
2403 Hash *node, int mcvsToUse)
2404{
2405 HeapTupleData *statsTuple;
2406 AttStatsSlot sslot;
2407
2408 /* Do nothing if planner didn't identify the outer relation's join key */
2409 if (!OidIsValid(node->skewTable))
2410 return;
2411 /* Also, do nothing if we don't have room for at least one skew bucket */
2412 if (mcvsToUse <= 0)
2413 return;
2414
2415 /*
2416 * Try to find the MCV statistics for the outer relation's join key.
2417 */
2418 statsTuple = SearchSysCache3(STATRELATTINH,
2421 BoolGetDatum(node->skewInherit));
2422 if (!HeapTupleIsValid(statsTuple))
2423 return;
2424
2425 if (get_attstatsslot(&sslot, statsTuple,
2426 STATISTIC_KIND_MCV, InvalidOid,
2428 {
2429 double frac;
2430 int nbuckets;
2431 int i;
2432
2433 if (mcvsToUse > sslot.nvalues)
2434 mcvsToUse = sslot.nvalues;
2435
2436 /*
2437 * Calculate the expected fraction of outer relation that will
2438 * participate in the skew optimization. If this isn't at least
2439 * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2440 */
2441 frac = 0;
2442 for (i = 0; i < mcvsToUse; i++)
2443 frac += sslot.numbers[i];
2445 {
2446 free_attstatsslot(&sslot);
2447 ReleaseSysCache(statsTuple);
2448 return;
2449 }
2450
2451 /*
2452 * Okay, set up the skew hashtable.
2453 *
2454 * skewBucket[] is an open addressing hashtable with a power of 2 size
2455 * that is greater than the number of MCV values. (This ensures there
2456 * will be at least one null entry, so searches will always
2457 * terminate.)
2458 *
2459 * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2460 * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2461 * since we limit pg_statistic entries to much less than that.
2462 */
2463 nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2464 /* use two more bits just to help avoid collisions */
2465 nbuckets <<= 2;
2466
2467 hashtable->skewEnabled = true;
2468 hashtable->skewBucketLen = nbuckets;
2469
2470 /*
2471 * We allocate the bucket memory in the hashtable's batch context. It
2472 * is only needed during the first batch, and this ensures it will be
2473 * automatically removed once the first batch is done.
2474 */
2475 hashtable->skewBucket = (HashSkewBucket **)
2477 nbuckets * sizeof(HashSkewBucket *));
2478 hashtable->skewBucketNums = (int *)
2480 mcvsToUse * sizeof(int));
2481
2482 hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2483 + mcvsToUse * sizeof(int);
2484 hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2485 + mcvsToUse * sizeof(int);
2486 if (hashtable->spaceUsed > hashtable->spacePeak)
2487 hashtable->spacePeak = hashtable->spaceUsed;
2488
2489 /*
2490 * Create a skew bucket for each MCV hash value.
2491 *
2492 * Note: it is very important that we create the buckets in order of
2493 * decreasing MCV frequency. If we have to remove some buckets, they
2494 * must be removed in reverse order of creation (see notes in
2495 * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2496 * be removed first.
2497 */
2498
2499 for (i = 0; i < mcvsToUse; i++)
2500 {
2501 uint32 hashvalue;
2502 int bucket;
2503
2504 hashvalue = DatumGetUInt32(FunctionCall1Coll(hashstate->skew_hashfunction,
2505 hashstate->skew_collation,
2506 sslot.values[i]));
2507
2508 /*
2509 * While we have not hit a hole in the hashtable and have not hit
2510 * the desired bucket, we have collided with some previous hash
2511 * value, so try the next bucket location. NB: this code must
2512 * match ExecHashGetSkewBucket.
2513 */
2514 bucket = hashvalue & (nbuckets - 1);
2515 while (hashtable->skewBucket[bucket] != NULL &&
2516 hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2517 bucket = (bucket + 1) & (nbuckets - 1);
2518
2519 /*
2520 * If we found an existing bucket with the same hashvalue, leave
2521 * it alone. It's okay for two MCVs to share a hashvalue.
2522 */
2523 if (hashtable->skewBucket[bucket] != NULL)
2524 continue;
2525
2526 /* Okay, create a new skew bucket for this hashvalue. */
2527 hashtable->skewBucket[bucket] = (HashSkewBucket *)
2528 MemoryContextAlloc(hashtable->batchCxt,
2529 sizeof(HashSkewBucket));
2530 hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2531 hashtable->skewBucket[bucket]->tuples = NULL;
2532 hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2533 hashtable->nSkewBuckets++;
2534 hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2535 hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
2536 if (hashtable->spaceUsed > hashtable->spacePeak)
2537 hashtable->spacePeak = hashtable->spaceUsed;
2538 }
2539
2540 free_attstatsslot(&sslot);
2541 }
2542
2543 ReleaseSysCache(statsTuple);
2544}
2545
2546/*
2547 * ExecHashGetSkewBucket
2548 *
2549 * Returns the index of the skew bucket for this hashvalue,
2550 * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2551 * associated with any active skew bucket.
2552 */
2553int
2555{
2556 int bucket;
2557
2558 /*
2559 * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2560 * particular, this happens after the initial batch is done).
2561 */
2562 if (!hashtable->skewEnabled)
2564
2565 /*
2566 * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2567 */
2568 bucket = hashvalue & (hashtable->skewBucketLen - 1);
2569
2570 /*
2571 * While we have not hit a hole in the hashtable and have not hit the
2572 * desired bucket, we have collided with some other hash value, so try the
2573 * next bucket location.
2574 */
2575 while (hashtable->skewBucket[bucket] != NULL &&
2576 hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2577 bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2578
2579 /*
2580 * Found the desired bucket?
2581 */
2582 if (hashtable->skewBucket[bucket] != NULL)
2583 return bucket;
2584
2585 /*
2586 * There must not be any hashtable entry for this hash value.
2587 */
2589}
2590
2591/*
2592 * ExecHashSkewTableInsert
2593 *
2594 * Insert a tuple into the skew hashtable.
2595 *
2596 * This should generally match up with the current-batch case in
2597 * ExecHashTableInsert.
2598 */
2599static void
2601 TupleTableSlot *slot,
2602 uint32 hashvalue,
2603 int bucketNumber)
2604{
2605 bool shouldFree;
2606 MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2607 HashJoinTuple hashTuple;
2608 int hashTupleSize;
2609
2610 /* Create the HashJoinTuple */
2611 hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2612 hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2613 hashTupleSize);
2614 hashTuple->hashvalue = hashvalue;
2615 memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
2617
2618 /* Push it onto the front of the skew bucket's list */
2619 hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
2620 hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2621 Assert(hashTuple != hashTuple->next.unshared);
2622
2623 /* Account for space used, and back off if we've used too much */
2624 hashtable->spaceUsed += hashTupleSize;
2625 hashtable->spaceUsedSkew += hashTupleSize;
2626 if (hashtable->spaceUsed > hashtable->spacePeak)
2627 hashtable->spacePeak = hashtable->spaceUsed;
2628 while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2630
2631 /* Check we are not over the total spaceAllowed, either */
2632 if (hashtable->spaceUsed > hashtable->spaceAllowed)
2633 ExecHashIncreaseNumBatches(hashtable);
2634
2635 if (shouldFree)
2637}
2638
2639/*
2640 * ExecHashRemoveNextSkewBucket
2641 *
2642 * Remove the least valuable skew bucket by pushing its tuples into
2643 * the main hash table.
2644 */
2645static void
2647{
2648 int bucketToRemove;
2649 HashSkewBucket *bucket;
2650 uint32 hashvalue;
2651 int bucketno;
2652 int batchno;
2653 HashJoinTuple hashTuple;
2654
2655 /* Locate the bucket to remove */
2656 bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2657 bucket = hashtable->skewBucket[bucketToRemove];
2658
2659 /*
2660 * Calculate which bucket and batch the tuples belong to in the main
2661 * hashtable. They all have the same hash value, so it's the same for all
2662 * of them. Also note that it's not possible for nbatch to increase while
2663 * we are processing the tuples.
2664 */
2665 hashvalue = bucket->hashvalue;
2666 ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2667
2668 /* Process all tuples in the bucket */
2669 hashTuple = bucket->tuples;
2670 while (hashTuple != NULL)
2671 {
2672 HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2673 MinimalTuple tuple;
2674 Size tupleSize;
2675
2676 /*
2677 * This code must agree with ExecHashTableInsert. We do not use
2678 * ExecHashTableInsert directly as ExecHashTableInsert expects a
2679 * TupleTableSlot while we already have HashJoinTuples.
2680 */
2681 tuple = HJTUPLE_MINTUPLE(hashTuple);
2682 tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2683
2684 /* Decide whether to put the tuple in the hash table or a temp file */
2685 if (batchno == hashtable->curbatch)
2686 {
2687 /* Move the tuple to the main hash table */
2688 HashJoinTuple copyTuple;
2689
2690 /*
2691 * We must copy the tuple into the dense storage, else it will not
2692 * be found by, eg, ExecHashIncreaseNumBatches.
2693 */
2694 copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2695 memcpy(copyTuple, hashTuple, tupleSize);
2696 pfree(hashTuple);
2697
2698 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2699 hashtable->buckets.unshared[bucketno] = copyTuple;
2700
2701 /* We have reduced skew space, but overall space doesn't change */
2702 hashtable->spaceUsedSkew -= tupleSize;
2703 }
2704 else
2705 {
2706 /* Put the tuple into a temp file for later batches */
2707 Assert(batchno > hashtable->curbatch);
2708 ExecHashJoinSaveTuple(tuple, hashvalue,
2709 &hashtable->innerBatchFile[batchno],
2710 hashtable);
2711 pfree(hashTuple);
2712 hashtable->spaceUsed -= tupleSize;
2713 hashtable->spaceUsedSkew -= tupleSize;
2714 }
2715
2716 hashTuple = nextHashTuple;
2717
2718 /* allow this loop to be cancellable */
2720 }
2721
2722 /*
2723 * Free the bucket struct itself and reset the hashtable entry to NULL.
2724 *
2725 * NOTE: this is not nearly as simple as it looks on the surface, because
2726 * of the possibility of collisions in the hashtable. Suppose that hash
2727 * values A and B collide at a particular hashtable entry, and that A was
2728 * entered first so B gets shifted to a different table entry. If we were
2729 * to remove A first then ExecHashGetSkewBucket would mistakenly start
2730 * reporting that B is not in the hashtable, because it would hit the NULL
2731 * before finding B. However, we always remove entries in the reverse
2732 * order of creation, so this failure cannot happen.
2733 */
2734 hashtable->skewBucket[bucketToRemove] = NULL;
2735 hashtable->nSkewBuckets--;
2736 pfree(bucket);
2737 hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2738 hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2739
2740 /*
2741 * If we have removed all skew buckets then give up on skew optimization.
2742 * Release the arrays since they aren't useful any more.
2743 */
2744 if (hashtable->nSkewBuckets == 0)
2745 {
2746 hashtable->skewEnabled = false;
2747 pfree(hashtable->skewBucket);
2748 pfree(hashtable->skewBucketNums);
2749 hashtable->skewBucket = NULL;
2750 hashtable->skewBucketNums = NULL;
2751 hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2752 hashtable->spaceUsedSkew = 0;
2753 }
2754}
2755
2756/*
2757 * Reserve space in the DSM segment for instrumentation data.
2758 */
2759void
2761{
2762 size_t size;
2763
2764 /* don't need this if not instrumenting or no workers */
2765 if (!node->ps.instrument || pcxt->nworkers == 0)
2766 return;
2767
2768 size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2769 size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2770 shm_toc_estimate_chunk(&pcxt->estimator, size);
2772}
2773
2774/*
2775 * Set up a space in the DSM for all workers to record instrumentation data
2776 * about their hash table.
2777 */
2778void
2780{
2781 size_t size;
2782
2783 /* don't need this if not instrumenting or no workers */
2784 if (!node->ps.instrument || pcxt->nworkers == 0)
2785 return;
2786
2787 size = offsetof(SharedHashInfo, hinstrument) +
2788 pcxt->nworkers * sizeof(HashInstrumentation);
2789 node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2790
2791 /* Each per-worker area must start out as zeroes. */
2792 memset(node->shared_info, 0, size);
2793
2794 node->shared_info->num_workers = pcxt->nworkers;
2795 shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2796 node->shared_info);
2797}
2798
2799/*
2800 * Locate the DSM space for hash table instrumentation data that we'll write
2801 * to at shutdown time.
2802 */
2803void
2805{
2806 SharedHashInfo *shared_info;
2807
2808 /* don't need this if not instrumenting */
2809 if (!node->ps.instrument)
2810 return;
2811
2812 /*
2813 * Find our entry in the shared area, and set up a pointer to it so that
2814 * we'll accumulate stats there when shutting down or rebuilding the hash
2815 * table.
2816 */
2817 shared_info = (SharedHashInfo *)
2818 shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2819 node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2820}
2821
2822/*
2823 * Collect EXPLAIN stats if needed, saving them into DSM memory if
2824 * ExecHashInitializeWorker was called, or local storage if not. In the
2825 * parallel case, this must be done in ExecShutdownHash() rather than
2826 * ExecEndHash() because the latter runs after we've detached from the DSM
2827 * segment.
2828 */
2829void
2831{
2832 /* Allocate save space if EXPLAIN'ing and we didn't do so already */
2833 if (node->ps.instrument && !node->hinstrument)
2835 /* Now accumulate data for the current (final) hash table */
2836 if (node->hinstrument && node->hashtable)
2838}
2839
2840/*
2841 * Retrieve instrumentation data from workers before the DSM segment is
2842 * detached, so that EXPLAIN can access it.
2843 */
2844void
2846{
2847 SharedHashInfo *shared_info = node->shared_info;
2848 size_t size;
2849
2850 if (shared_info == NULL)
2851 return;
2852
2853 /* Replace node->shared_info with a copy in backend-local memory. */
2854 size = offsetof(SharedHashInfo, hinstrument) +
2855 shared_info->num_workers * sizeof(HashInstrumentation);
2856 node->shared_info = palloc(size);
2857 memcpy(node->shared_info, shared_info, size);
2858}
2859
2860/*
2861 * Accumulate instrumentation data from 'hashtable' into an
2862 * initially-zeroed HashInstrumentation struct.
2863 *
2864 * This is used to merge information across successive hash table instances
2865 * within a single plan node. We take the maximum values of each interesting
2866 * number. The largest nbuckets and largest nbatch values might have occurred
2867 * in different instances, so there's some risk of confusion from reporting
2868 * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2869 * issue if we don't report the largest values. Similarly, we want to report
2870 * the largest spacePeak regardless of whether it happened in the same
2871 * instance as the largest nbuckets or nbatch. All the instances should have
2872 * the same nbuckets_original and nbatch_original; but there's little value
2873 * in depending on that here, so handle them the same way.
2874 */
2875void
2877 HashJoinTable hashtable)
2878{
2879 instrument->nbuckets = Max(instrument->nbuckets,
2880 hashtable->nbuckets);
2881 instrument->nbuckets_original = Max(instrument->nbuckets_original,
2882 hashtable->nbuckets_original);
2883 instrument->nbatch = Max(instrument->nbatch,
2884 hashtable->nbatch);
2885 instrument->nbatch_original = Max(instrument->nbatch_original,
2886 hashtable->nbatch_original);
2887 instrument->space_peak = Max(instrument->space_peak,
2888 hashtable->spacePeak);
2889}
2890
2891/*
2892 * Allocate 'size' bytes from the currently active HashMemoryChunk
2893 */
2894static void *
2896{
2897 HashMemoryChunk newChunk;
2898 char *ptr;
2899
2900 /* just in case the size is not already aligned properly */
2901 size = MAXALIGN(size);
2902
2903 /*
2904 * If tuple size is larger than threshold, allocate a separate chunk.
2905 */
2906 if (size > HASH_CHUNK_THRESHOLD)
2907 {
2908 /* allocate new chunk and put it at the beginning of the list */
2909 newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2910 HASH_CHUNK_HEADER_SIZE + size);
2911 newChunk->maxlen = size;
2912 newChunk->used = size;
2913 newChunk->ntuples = 1;
2914
2915 /*
2916 * Add this chunk to the list after the first existing chunk, so that
2917 * we don't lose the remaining space in the "current" chunk.
2918 */
2919 if (hashtable->chunks != NULL)
2920 {
2921 newChunk->next = hashtable->chunks->next;
2922 hashtable->chunks->next.unshared = newChunk;
2923 }
2924 else
2925 {
2926 newChunk->next.unshared = hashtable->chunks;
2927 hashtable->chunks = newChunk;
2928 }
2929
2930 return HASH_CHUNK_DATA(newChunk);
2931 }
2932
2933 /*
2934 * See if we have enough space for it in the current chunk (if any). If
2935 * not, allocate a fresh chunk.
2936 */
2937 if ((hashtable->chunks == NULL) ||
2938 (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2939 {
2940 /* allocate new chunk and put it at the beginning of the list */
2941 newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2943
2944 newChunk->maxlen = HASH_CHUNK_SIZE;
2945 newChunk->used = size;
2946 newChunk->ntuples = 1;
2947
2948 newChunk->next.unshared = hashtable->chunks;
2949 hashtable->chunks = newChunk;
2950
2951 return HASH_CHUNK_DATA(newChunk);
2952 }
2953
2954 /* There is enough space in the current chunk, let's add the tuple */
2955 ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
2956 hashtable->chunks->used += size;
2957 hashtable->chunks->ntuples += 1;
2958
2959 /* return pointer to the start of the tuple memory */
2960 return ptr;
2961}
2962
2963/*
2964 * Allocate space for a tuple in shared dense storage. This is equivalent to
2965 * dense_alloc but for Parallel Hash using shared memory.
2966 *
2967 * While loading a tuple into shared memory, we might run out of memory and
2968 * decide to repartition, or determine that the load factor is too high and
2969 * decide to expand the bucket array, or discover that another participant has
2970 * commanded us to help do that. Return NULL if number of buckets or batches
2971 * has changed, indicating that the caller must retry (considering the
2972 * possibility that the tuple no longer belongs in the same batch).
2973 */
2974static HashJoinTuple
2976 dsa_pointer *shared)
2977{
2978 ParallelHashJoinState *pstate = hashtable->parallel_state;
2979 dsa_pointer chunk_shared;
2980 HashMemoryChunk chunk;
2981 Size chunk_size;
2982 HashJoinTuple result;
2983 int curbatch = hashtable->curbatch;
2984
2985 size = MAXALIGN(size);
2986
2987 /*
2988 * Fast path: if there is enough space in this backend's current chunk,
2989 * then we can allocate without any locking.
2990 */
2991 chunk = hashtable->current_chunk;
2992 if (chunk != NULL &&
2993 size <= HASH_CHUNK_THRESHOLD &&
2994 chunk->maxlen - chunk->used >= size)
2995 {
2996
2997 chunk_shared = hashtable->current_chunk_shared;
2998 Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2999 *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
3000 result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
3001 chunk->used += size;
3002
3003 Assert(chunk->used <= chunk->maxlen);
3004 Assert(result == dsa_get_address(hashtable->area, *shared));
3005
3006 return result;
3007 }
3008
3009 /* Slow path: try to allocate a new chunk. */
3010 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3011
3012 /*
3013 * Check if we need to help increase the number of buckets or batches.
3014 */
3015 if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3017 {
3018 ParallelHashGrowth growth = pstate->growth;
3019
3020 hashtable->current_chunk = NULL;
3021 LWLockRelease(&pstate->lock);
3022
3023 /* Another participant has commanded us to help grow. */
3024 if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3026 else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3028
3029 /* The caller must retry. */
3030 return NULL;
3031 }
3032
3033 /* Oversized tuples get their own chunk. */
3034 if (size > HASH_CHUNK_THRESHOLD)
3035 chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3036 else
3037 chunk_size = HASH_CHUNK_SIZE;
3038
3039 /* Check if it's time to grow batches or buckets. */
3040 if (pstate->growth != PHJ_GROWTH_DISABLED)
3041 {
3042 Assert(curbatch == 0);
3044
3045 /*
3046 * Check if our space limit would be exceeded. To avoid choking on
3047 * very large tuples or very low hash_mem setting, we'll always allow
3048 * each backend to allocate at least one chunk.
3049 */
3050 if (hashtable->batches[0].at_least_one_chunk &&
3051 hashtable->batches[0].shared->size +
3052 chunk_size > pstate->space_allowed)
3053 {
3055 hashtable->batches[0].shared->space_exhausted = true;
3056 LWLockRelease(&pstate->lock);
3057
3058 return NULL;
3059 }
3060
3061 /* Check if our load factor limit would be exceeded. */
3062 if (hashtable->nbatch == 1)
3063 {
3064 hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3065 hashtable->batches[0].ntuples = 0;
3066 /* Guard against integer overflow and alloc size overflow */
3067 if (hashtable->batches[0].shared->ntuples + 1 >
3068 hashtable->nbuckets * NTUP_PER_BUCKET &&
3069 hashtable->nbuckets < (INT_MAX / 2) &&
3070 hashtable->nbuckets * 2 <=
3072 {
3074 LWLockRelease(&pstate->lock);
3075
3076 return NULL;
3077 }
3078 }
3079 }
3080
3081 /* We are cleared to allocate a new chunk. */
3082 chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3083 hashtable->batches[curbatch].shared->size += chunk_size;
3084 hashtable->batches[curbatch].at_least_one_chunk = true;
3085
3086 /* Set up the chunk. */
3087 chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3088 *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3089 chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3090 chunk->used = size;
3091
3092 /*
3093 * Push it onto the list of chunks, so that it can be found if we need to
3094 * increase the number of buckets or batches (batch 0 only) and later for
3095 * freeing the memory (all batches).
3096 */
3097 chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3098 hashtable->batches[curbatch].shared->chunks = chunk_shared;
3099
3100 if (size <= HASH_CHUNK_THRESHOLD)
3101 {
3102 /*
3103 * Make this the current chunk so that we can use the fast path to
3104 * fill the rest of it up in future calls.
3105 */
3106 hashtable->current_chunk = chunk;
3107 hashtable->current_chunk_shared = chunk_shared;
3108 }
3109 LWLockRelease(&pstate->lock);
3110
3111 Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3112 result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3113
3114 return result;
3115}
3116
3117/*
3118 * One backend needs to set up the shared batch state including tuplestores.
3119 * Other backends will ensure they have correctly configured accessors by
3120 * called ExecParallelHashEnsureBatchAccessors().
3121 */
3122static void
3124{
3125 ParallelHashJoinState *pstate = hashtable->parallel_state;
3126 ParallelHashJoinBatch *batches;
3127 MemoryContext oldcxt;
3128 int i;
3129
3130 Assert(hashtable->batches == NULL);
3131
3132 /* Allocate space. */
3133 pstate->batches =
3134 dsa_allocate0(hashtable->area,
3135 EstimateParallelHashJoinBatch(hashtable) * nbatch);
3136 pstate->nbatch = nbatch;
3137 batches = dsa_get_address(hashtable->area, pstate->batches);
3138
3139 /*
3140 * Use hash join spill memory context to allocate accessors, including
3141 * buffers for the temporary files.
3142 */
3143 oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3144
3145 /* Allocate this backend's accessor array. */
3146 hashtable->nbatch = nbatch;
3147 hashtable->batches =
3149
3150 /* Set up the shared state, tuplestores and backend-local accessors. */
3151 for (i = 0; i < hashtable->nbatch; ++i)
3152 {
3153 ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3155 char name[MAXPGPATH];
3156
3157 /*
3158 * All members of shared were zero-initialized. We just need to set
3159 * up the Barrier.
3160 */
3161 BarrierInit(&shared->batch_barrier, 0);
3162 if (i == 0)
3163 {
3164 /* Batch 0 doesn't need to be loaded. */
3165 BarrierAttach(&shared->batch_barrier);
3166 while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
3168 BarrierDetach(&shared->batch_barrier);
3169 }
3170
3171 /* Initialize accessor state. All members were zero-initialized. */
3172 accessor->shared = shared;
3173
3174 /* Initialize the shared tuplestores. */
3175 snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3176 accessor->inner_tuples =
3178 pstate->nparticipants,
3180 sizeof(uint32),
3182 &pstate->fileset,
3183 name);
3184 snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3185 accessor->outer_tuples =
3187 pstate->nparticipants),
3188 pstate->nparticipants,
3190 sizeof(uint32),
3192 &pstate->fileset,
3193 name);
3194 }
3195
3196 MemoryContextSwitchTo(oldcxt);
3197}
3198
3199/*
3200 * Free the current set of ParallelHashJoinBatchAccessor objects.
3201 */
3202static void
3204{
3205 int i;
3206
3207 for (i = 0; i < hashtable->nbatch; ++i)
3208 {
3209 /* Make sure no files are left open. */
3210 sts_end_write(hashtable->batches[i].inner_tuples);
3211 sts_end_write(hashtable->batches[i].outer_tuples);
3214 }
3215 pfree(hashtable->batches);
3216 hashtable->batches = NULL;
3217}
3218
3219/*
3220 * Make sure this backend has up-to-date accessors for the current set of
3221 * batches.
3222 */
3223static void
3225{
3226 ParallelHashJoinState *pstate = hashtable->parallel_state;
3227 ParallelHashJoinBatch *batches;
3228 MemoryContext oldcxt;
3229 int i;
3230
3231 if (hashtable->batches != NULL)
3232 {
3233 if (hashtable->nbatch == pstate->nbatch)
3234 return;
3236 }
3237
3238 /*
3239 * We should never see a state where the batch-tracking array is freed,
3240 * because we should have given up sooner if we join when the build
3241 * barrier has reached the PHJ_BUILD_FREE phase.
3242 */
3244
3245 /*
3246 * Use hash join spill memory context to allocate accessors, including
3247 * buffers for the temporary files.
3248 */
3249 oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3250
3251 /* Allocate this backend's accessor array. */
3252 hashtable->nbatch = pstate->nbatch;
3253 hashtable->batches =
3255
3256 /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3257 batches = (ParallelHashJoinBatch *)
3258 dsa_get_address(hashtable->area, pstate->batches);
3259
3260 /* Set up the accessor array and attach to the tuplestores. */
3261 for (i = 0; i < hashtable->nbatch; ++i)
3262 {
3263 ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3265
3266 accessor->shared = shared;
3267 accessor->preallocated = 0;
3268 accessor->done = false;
3269 accessor->outer_eof = false;
3270 accessor->inner_tuples =
3273 &pstate->fileset);
3274 accessor->outer_tuples =
3276 pstate->nparticipants),
3278 &pstate->fileset);
3279 }
3280
3281 MemoryContextSwitchTo(oldcxt);
3282}
3283
3284/*
3285 * Allocate an empty shared memory hash table for a given batch.
3286 */
3287void
3289{
3290 ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3291 dsa_pointer_atomic *buckets;
3292 int nbuckets = hashtable->parallel_state->nbuckets;
3293 int i;
3294
3295 batch->buckets =
3296 dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3297 buckets = (dsa_pointer_atomic *)
3298 dsa_get_address(hashtable->area, batch->buckets);
3299 for (i = 0; i < nbuckets; ++i)
3301}
3302
3303/*
3304 * If we are currently attached to a shared hash join batch, detach. If we
3305 * are last to detach, clean up.
3306 */
3307void
3309{
3310 if (hashtable->parallel_state != NULL &&
3311 hashtable->curbatch >= 0)
3312 {
3313 int curbatch = hashtable->curbatch;
3314 ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
3315 bool attached = true;
3316
3317 /* Make sure any temporary files are closed. */
3318 sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3319 sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3320
3321 /* After attaching we always get at least to PHJ_BATCH_PROBE. */
3324
3325 /*
3326 * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3327 * reached the end of it, it means the plan doesn't want any more
3328 * tuples, and it is happy to abandon any tuples buffered in this
3329 * process's subplans. For correctness, we can't allow any process to
3330 * execute the PHJ_BATCH_SCAN phase, because we will never have the
3331 * complete set of match bits. Therefore we skip emitting unmatched
3332 * tuples in all backends (if this is a full/right join), as if those
3333 * tuples were all due to be emitted by this process and it has
3334 * abandoned them too.
3335 */
3336 if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3337 !hashtable->batches[curbatch].outer_eof)
3338 {
3339 /*
3340 * This flag may be written to by multiple backends during
3341 * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3342 * phase so requires no extra locking.
3343 */
3344 batch->skip_unmatched = true;
3345 }
3346
3347 /*
3348 * Even if we aren't doing a full/right outer join, we'll step through
3349 * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3350 * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3351 */
3354 if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3355 {
3356 /*
3357 * We are not longer attached to the batch barrier, but we're the
3358 * process that was chosen to free resources and it's safe to
3359 * assert the current phase. The ParallelHashJoinBatch can't go
3360 * away underneath us while we are attached to the build barrier,
3361 * making this access safe.
3362 */
3364
3365 /* Free shared chunks and buckets. */
3366 while (DsaPointerIsValid(batch->chunks))
3367 {
3368 HashMemoryChunk chunk =
3369 dsa_get_address(hashtable->area, batch->chunks);
3370 dsa_pointer next = chunk->next.shared;
3371
3372 dsa_free(hashtable->area, batch->chunks);
3373 batch->chunks = next;
3374 }
3375 if (DsaPointerIsValid(batch->buckets))
3376 {
3377 dsa_free(hashtable->area, batch->buckets);
3378 batch->buckets = InvalidDsaPointer;
3379 }
3380 }
3381
3382 /*
3383 * Track the largest batch we've been attached to. Though each
3384 * backend might see a different subset of batches, explain.c will
3385 * scan the results from all backends to find the largest value.
3386 */
3387 hashtable->spacePeak =
3388 Max(hashtable->spacePeak,
3389 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3390
3391 /* Remember that we are not attached to a batch. */
3392 hashtable->curbatch = -1;
3393 }
3394}
3395
3396/*
3397 * Detach from all shared resources. If we are last to detach, clean up.
3398 */
3399void
3401{
3402 ParallelHashJoinState *pstate = hashtable->parallel_state;
3403
3404 /*
3405 * If we're involved in a parallel query, we must either have gotten all
3406 * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3407 */
3408 Assert(!pstate ||
3410
3411 if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3412 {
3413 int i;
3414
3415 /* Make sure any temporary files are closed. */
3416 if (hashtable->batches)
3417 {
3418 for (i = 0; i < hashtable->nbatch; ++i)
3419 {
3420 sts_end_write(hashtable->batches[i].inner_tuples);
3421 sts_end_write(hashtable->batches[i].outer_tuples);
3424 }
3425 }
3426
3427 /* If we're last to detach, clean up shared memory. */
3429 {
3430 /*
3431 * Late joining processes will see this state and give up
3432 * immediately.
3433 */
3435
3436 if (DsaPointerIsValid(pstate->batches))
3437 {
3438 dsa_free(hashtable->area, pstate->batches);
3439 pstate->batches = InvalidDsaPointer;
3440 }
3441 }
3442 }
3443 hashtable->parallel_state = NULL;
3444}
3445
3446/*
3447 * Get the first tuple in a given bucket identified by number.
3448 */
3449static inline HashJoinTuple
3451{
3452 HashJoinTuple tuple;
3453 dsa_pointer p;
3454
3455 Assert(hashtable->parallel_state);
3456 p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3457 tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3458
3459 return tuple;
3460}
3461
3462/*
3463 * Get the next tuple in the same bucket as 'tuple'.
3464 */
3465static inline HashJoinTuple
3467{
3469
3470 Assert(hashtable->parallel_state);
3471 next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3472
3473 return next;
3474}
3475
3476/*
3477 * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3478 */
3479static inline void
3481 HashJoinTuple tuple,
3482 dsa_pointer tuple_shared)
3483{
3484 for (;;)
3485 {
3486 tuple->next.shared = dsa_pointer_atomic_read(head);
3488 &tuple->next.shared,
3489 tuple_shared))
3490 break;
3491 }
3492}
3493
3494/*
3495 * Prepare to work on a given batch.
3496 */
3497void
3499{
3500 Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3501
3502 hashtable->curbatch = batchno;
3503 hashtable->buckets.shared = (dsa_pointer_atomic *)
3504 dsa_get_address(hashtable->area,
3505 hashtable->batches[batchno].shared->buckets);
3506 hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3507 hashtable->log2_nbuckets = pg_ceil_log2_32(hashtable->nbuckets);
3508 hashtable->current_chunk = NULL;
3510 hashtable->batches[batchno].at_least_one_chunk = false;
3511}
3512
3513/*
3514 * Take the next available chunk from the queue of chunks being worked on in
3515 * parallel. Return NULL if there are none left. Otherwise return a pointer
3516 * to the chunk, and set *shared to the DSA pointer to the chunk.
3517 */
3518static HashMemoryChunk
3520{
3521 ParallelHashJoinState *pstate = hashtable->parallel_state;
3522 HashMemoryChunk chunk;
3523
3524 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3526 {
3527 *shared = pstate->chunk_work_queue;
3528 chunk = (HashMemoryChunk)
3529 dsa_get_address(hashtable->area, *shared);
3530 pstate->chunk_work_queue = chunk->next.shared;
3531 }
3532 else
3533 chunk = NULL;
3534 LWLockRelease(&pstate->lock);
3535
3536 return chunk;
3537}
3538
3539/*
3540 * Increase the space preallocated in this backend for a given inner batch by
3541 * at least a given amount. This allows us to track whether a given batch
3542 * would fit in memory when loaded back in. Also increase the number of
3543 * batches or buckets if required.
3544 *
3545 * This maintains a running estimation of how much space will be taken when we
3546 * load the batch back into memory by simulating the way chunks will be handed
3547 * out to workers. It's not perfectly accurate because the tuples will be
3548 * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3549 * it should be pretty close. It tends to overestimate by a fraction of a
3550 * chunk per worker since all workers gang up to preallocate during hashing,
3551 * but workers tend to reload batches alone if there are enough to go around,
3552 * leaving fewer partially filled chunks. This effect is bounded by
3553 * nparticipants.
3554 *
3555 * Return false if the number of batches or buckets has changed, and the
3556 * caller should reconsider which batch a given tuple now belongs in and call
3557 * again.
3558 */
3559static bool
3560ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3561{
3562 ParallelHashJoinState *pstate = hashtable->parallel_state;
3563 ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3564 size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3565
3566 Assert(batchno > 0);
3567 Assert(batchno < hashtable->nbatch);
3568 Assert(size == MAXALIGN(size));
3569
3570 LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3571
3572 /* Has another participant commanded us to help grow? */
3573 if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3575 {
3576 ParallelHashGrowth growth = pstate->growth;
3577
3578 LWLockRelease(&pstate->lock);
3579 if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3581 else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3583
3584 return false;
3585 }
3586
3587 if (pstate->growth != PHJ_GROWTH_DISABLED &&
3588 batch->at_least_one_chunk &&
3590 > pstate->space_allowed))
3591 {
3592 /*
3593 * We have determined that this batch would exceed the space budget if
3594 * loaded into memory. Command all participants to help repartition.
3595 */
3596 batch->shared->space_exhausted = true;
3598 LWLockRelease(&pstate->lock);
3599
3600 return false;
3601 }
3602
3603 batch->at_least_one_chunk = true;
3605 batch->preallocated = want;
3606 LWLockRelease(&pstate->lock);
3607
3608 return true;
3609}
3610
3611/*
3612 * Calculate the limit on how much memory can be used by Hash and similar
3613 * plan types. This is work_mem times hash_mem_multiplier, and is
3614 * expressed in bytes.
3615 *
3616 * Exported for use by the planner, as well as other hash-like executor
3617 * nodes. This is a rather random place for this, but there is no better
3618 * place.
3619 */
3620size_t
3622{
3623 double mem_limit;
3624
3625 /* Do initial calculation in double arithmetic */
3626 mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3627
3628 /* Clamp in case it doesn't fit in size_t */
3629 mem_limit = Min(mem_limit, (double) SIZE_MAX);
3630
3631 return (size_t) mem_limit;
3632}
Datum idx(PG_FUNCTION_ARGS)
Definition: _int_op.c:262
int ParallelWorkerNumber
Definition: parallel.c:115
void PrepareTempTablespaces(void)
Definition: tablespace.c:1331
bool BarrierArriveAndDetachExceptLast(Barrier *barrier)
Definition: barrier.c:213
bool BarrierArriveAndDetach(Barrier *barrier)
Definition: barrier.c:203
int BarrierAttach(Barrier *barrier)
Definition: barrier.c:236
void BarrierInit(Barrier *barrier, int participants)
Definition: barrier.c:100
int BarrierPhase(Barrier *barrier)
Definition: barrier.c:265
bool BarrierArriveAndWait(Barrier *barrier, uint32 wait_event_info)
Definition: barrier.c:125
bool BarrierDetach(Barrier *barrier)
Definition: barrier.c:256
static int32 next
Definition: blutils.c:224
void BufFileClose(BufFile *file)
Definition: buffile.c:412
#define Min(x, y)
Definition: c.h:1008
#define MAXALIGN(LEN)
Definition: c.h:815
#define Max(x, y)
Definition: c.h:1002
uint32_t uint32
Definition: c.h:543
#define OidIsValid(objectId)
Definition: c.h:779
size_t Size
Definition: c.h:615
void * dsa_get_address(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:957
void dsa_free(dsa_area *area, dsa_pointer dp)
Definition: dsa.c:841
#define dsa_allocate0(area, size)
Definition: dsa.h:113
uint64 dsa_pointer
Definition: dsa.h:62
#define dsa_pointer_atomic_init
Definition: dsa.h:64
#define dsa_allocate(area, size)
Definition: dsa.h:109
#define dsa_pointer_atomic_write
Definition: dsa.h:66
#define InvalidDsaPointer
Definition: dsa.h:78
#define dsa_pointer_atomic_compare_exchange
Definition: dsa.h:68
#define dsa_pointer_atomic_read
Definition: dsa.h:65
pg_atomic_uint64 dsa_pointer_atomic
Definition: dsa.h:63
#define DsaPointerIsValid(x)
Definition: dsa.h:106
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void ExecReScan(PlanState *node)
Definition: execAmi.c:77
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
MinimalTuple ExecFetchSlotMinimalTuple(TupleTableSlot *slot, bool *shouldFree)
Definition: execTuples.c:1881
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1635
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1988
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:86
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:485
#define outerPlanState(node)
Definition: execnodes.h:1261
struct HashJoinTupleData * HashJoinTuple
Definition: execnodes.h:2256
struct HashInstrumentation HashInstrumentation
#define EXEC_FLAG_BACKWARD
Definition: executor.h:69
#define ResetExprContext(econtext)
Definition: executor.h:650
static bool ExecQualAndReset(ExprState *state, ExprContext *econtext)
Definition: executor.h:546
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:314
static Datum ExecEvalExprSwitchContext(ExprState *state, ExprContext *econtext, bool *isNull)
Definition: executor.h:436
#define EXEC_FLAG_MARK
Definition: executor.h:70
#define palloc_object(type)
Definition: fe_memutils.h:74
#define MaxAllocSize
Definition: fe_memutils.h:22
#define repalloc_array(pointer, type, count)
Definition: fe_memutils.h:78
#define palloc0_array(type, count)
Definition: fe_memutils.h:77
#define palloc0_object(type)
Definition: fe_memutils.h:75
Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation, Datum arg1)
Definition: fmgr.c:1130
double hash_mem_multiplier
Definition: globals.c:132
int work_mem
Definition: globals.c:131
Assert(PointerIsAligned(start, uint64))
#define PHJ_GROW_BATCHES_REPARTITION
Definition: hashjoin.h:287
struct HashMemoryChunkData * HashMemoryChunk
Definition: hashjoin.h:148
#define PHJ_BATCH_SCAN
Definition: hashjoin.h:281
#define HASH_CHUNK_DATA(hc)
Definition: hashjoin.h:152
#define PHJ_BATCH_PROBE
Definition: hashjoin.h:280
#define PHJ_GROW_BUCKETS_REINSERT
Definition: hashjoin.h:295
#define SKEW_MIN_OUTER_FRACTION
Definition: hashjoin.h:122
#define PHJ_GROW_BUCKETS_ELECT
Definition: hashjoin.h:293
#define HJTUPLE_OVERHEAD
Definition: hashjoin.h:90
#define PHJ_GROW_BUCKETS_PHASE(n)
Definition: hashjoin.h:296
#define PHJ_GROW_BATCHES_ELECT
Definition: hashjoin.h:285
#define ParallelHashJoinBatchInner(batch)
Definition: hashjoin.h:182
#define PHJ_BUILD_FREE
Definition: hashjoin.h:274
#define PHJ_BUILD_HASH_INNER
Definition: hashjoin.h:271
#define NthParallelHashJoinBatch(base, n)
Definition: hashjoin.h:198
#define HASH_CHUNK_THRESHOLD
Definition: hashjoin.h:154
#define PHJ_BUILD_HASH_OUTER
Definition: hashjoin.h:272
#define HJTUPLE_MINTUPLE(hjtup)
Definition: hashjoin.h:91
#define SKEW_BUCKET_OVERHEAD
Definition: hashjoin.h:119
#define PHJ_GROW_BATCHES_DECIDE
Definition: hashjoin.h:288
#define PHJ_GROW_BATCHES_REALLOCATE
Definition: hashjoin.h:286
#define HASH_CHUNK_HEADER_SIZE
Definition: hashjoin.h:151
#define PHJ_GROW_BATCHES_FINISH
Definition: hashjoin.h:289
#define ParallelHashJoinBatchOuter(batch, nparticipants)
Definition: hashjoin.h:187
#define SKEW_HASH_MEM_PERCENT
Definition: hashjoin.h:121
#define PHJ_BUILD_ALLOCATE
Definition: hashjoin.h:270
#define PHJ_GROW_BUCKETS_REALLOCATE
Definition: hashjoin.h:294
#define PHJ_BATCH_FREE
Definition: hashjoin.h:282
#define PHJ_GROW_BATCHES_PHASE(n)
Definition: hashjoin.h:290
#define HASH_CHUNK_SIZE
Definition: hashjoin.h:150
ParallelHashGrowth
Definition: hashjoin.h:231
@ PHJ_GROWTH_NEED_MORE_BUCKETS
Definition: hashjoin.h:235
@ PHJ_GROWTH_OK
Definition: hashjoin.h:233
@ PHJ_GROWTH_NEED_MORE_BATCHES
Definition: hashjoin.h:237
@ PHJ_GROWTH_DISABLED
Definition: hashjoin.h:239
#define PHJ_BUILD_RUN
Definition: hashjoin.h:273
#define INVALID_SKEW_BUCKET_NO
Definition: hashjoin.h:120
#define PHJ_BUILD_ELECT
Definition: hashjoin.h:269
#define EstimateParallelHashJoinBatch(hashtable)
Definition: hashjoin.h:193
void heap_free_minimal_tuple(MinimalTuple mtup)
Definition: heaptuple.c:1529
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
#define SizeofMinimalTupleHeader
Definition: htup_details.h:699
static void HeapTupleHeaderClearMatch(MinimalTupleData *tup)
Definition: htup_details.h:718
static bool HeapTupleHeaderHasMatch(const MinimalTupleData *tup)
Definition: htup_details.h:706
void InstrStartNode(Instrumentation *instr)
Definition: instrument.c:68
void InstrStopNode(Instrumentation *instr, double nTuples)
Definition: instrument.c:84
int j
Definition: isn.c:78
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void free_attstatsslot(AttStatsSlot *sslot)
Definition: lsyscache.c:3511
bool get_attstatsslot(AttStatsSlot *sslot, HeapTuple statstuple, int reqkind, Oid reqop, int flags)
Definition: lsyscache.c:3401
#define ATTSTATSSLOT_NUMBERS
Definition: lsyscache.h:44
#define ATTSTATSSLOT_VALUES
Definition: lsyscache.h:43
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
@ LW_EXCLUSIVE
Definition: lwlock.h:112
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:400
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1263
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:469
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
Definition: nodeHash.c:1586
static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
Definition: nodeHash.c:2646
void ExecParallelHashTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue)
Definition: nodeHash.c:1838
static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
Definition: nodeHash.c:3560
void ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
Definition: nodeHash.c:3498
static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
Definition: nodeHash.c:1649
static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
Definition: nodeHash.c:3224
void ExecHashTableReset(HashJoinTable hashtable)
Definition: nodeHash.c:2326
static void ExecHashBuildSkewHash(HashState *hashstate, HashJoinTable hashtable, Hash *node, int mcvsToUse)
Definition: nodeHash.c:2402
static HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
Definition: nodeHash.c:3450
void ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
Definition: nodeHash.c:2779
static bool ExecHashIncreaseBatchSize(HashJoinTable hashtable)
Definition: nodeHash.c:997
bool ExecParallelScanHashBucket(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:2052
static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size, dsa_pointer *shared)
Definition: nodeHash.c:2975
static void * dense_alloc(HashJoinTable hashtable, Size size)
Definition: nodeHash.c:2895
static void MultiExecParallelHash(HashState *node)
Definition: nodeHash.c:218
void ExecHashAccumInstrumentation(HashInstrumentation *instrument, HashJoinTable hashtable)
Definition: nodeHash.c:2876
static void MultiExecPrivateHash(HashState *node)
Definition: nodeHash.c:137
void ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
Definition: nodeHash.c:2804
static void ExecParallelHashPushTuple(dsa_pointer_atomic *head, HashJoinTuple tuple, dsa_pointer tuple_shared)
Definition: nodeHash.c:3480
Node * MultiExecHash(HashState *node)
Definition: nodeHash.c:104
HashState * ExecInitHash(Hash *node, EState *estate, int eflags)
Definition: nodeHash.c:369
void ExecHashTableDetachBatch(HashJoinTable hashtable)
Definition: nodeHash.c:3308
void ExecHashEstimate(HashState *node, ParallelContext *pcxt)
Definition: nodeHash.c:2760
void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, bool try_combined_hash_mem, int parallel_workers, size_t *space_allowed, int *numbuckets, int *numbatches, int *num_skew_mcvs)
Definition: nodeHash.c:657
void ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
Definition: nodeHash.c:2103
void ExecHashTableDetach(HashJoinTable hashtable)
Definition: nodeHash.c:3400
bool ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:2263
void ExecHashTableDestroy(HashJoinTable hashtable)
Definition: nodeHash.c:955
HashJoinTable ExecHashTableCreate(HashState *state)
Definition: nodeHash.c:445
#define NTUP_PER_BUCKET
Definition: nodeHash.c:654
int ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
Definition: nodeHash.c:2554
static void ExecHashIncreaseNumBatches(HashJoinTable hashtable)
Definition: nodeHash.c:1029
size_t get_hash_memory_limit(void)
Definition: nodeHash.c:3621
bool ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:2189
static void ExecHashSkewTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue, int bucketNumber)
Definition: nodeHash.c:2600
static void ExecParallelHashRepartitionRest(HashJoinTable hashtable)
Definition: nodeHash.c:1496
static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
Definition: nodeHash.c:3123
void ExecHashTableResetMatchFlags(HashJoinTable hashtable)
Definition: nodeHash.c:2354
static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
Definition: nodeHash.c:3203
static HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
Definition: nodeHash.c:3466
void ExecEndHash(HashState *node)
Definition: nodeHash.c:426
void ExecShutdownHash(HashState *node)
Definition: nodeHash.c:2830
void ExecHashTableInsert(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue)
Definition: nodeHash.c:1748
static TupleTableSlot * ExecHash(PlanState *pstate)
Definition: nodeHash.c:90
void ExecHashGetBucketAndBatch(HashJoinTable hashtable, uint32 hashvalue, int *bucketno, int *batchno)
Definition: nodeHash.c:1959
static void ExecParallelHashMergeCounters(HashJoinTable hashtable)
Definition: nodeHash.c:1556
void ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
Definition: nodeHash.c:3288
bool ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
Definition: nodeHash.c:2124
void ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable, TupleTableSlot *slot, uint32 hashvalue)
Definition: nodeHash.c:1904
static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
Definition: nodeHash.c:3519
void ExecReScanHash(HashState *node)
Definition: nodeHash.c:2380
bool ExecScanHashBucket(HashJoinState *hjstate, ExprContext *econtext)
Definition: nodeHash.c:1991
static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
Definition: nodeHash.c:1429
static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
Definition: nodeHash.c:1197
void ExecHashRetrieveInstrumentation(HashState *node)
Definition: nodeHash.c:2845
void ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue, BufFile **fileptr, HashJoinTable hashtable)
#define makeNode(_type_)
Definition: nodes.h:161
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
#define repalloc0_array(pointer, type, oldcount, count)
Definition: palloc.h:109
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
static uint32 pg_rotate_right32(uint32 word, int n)
Definition: pg_bitutils.h:422
#define pg_nextpower2_size_t
Definition: pg_bitutils.h:441
static uint32 pg_ceil_log2_32(uint32 num)
Definition: pg_bitutils.h:258
static uint32 pg_prevpower2_32(uint32 num)
Definition: pg_bitutils.h:235
#define pg_prevpower2_size_t
Definition: pg_bitutils.h:442
#define MAXPGPATH
#define NIL
Definition: pg_list.h:68
#define outerPlan(node)
Definition: plannodes.h:261
#define snprintf
Definition: port.h:239
#define printf(...)
Definition: port.h:245
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232
static Datum Int16GetDatum(int16 X)
Definition: postgres.h:182
static Datum BoolGetDatum(bool X)
Definition: postgres.h:112
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:262
uint64_t Datum
Definition: postgres.h:70
#define InvalidOid
Definition: postgres_ext.h:37
SharedTuplestoreAccessor * sts_attach(SharedTuplestore *sts, int my_participant_number, SharedFileSet *fileset)
MinimalTuple sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
void sts_end_write(SharedTuplestoreAccessor *accessor)
SharedTuplestoreAccessor * sts_initialize(SharedTuplestore *sts, int participants, int my_participant_number, size_t meta_data_size, int flags, SharedFileSet *fileset, const char *name)
void sts_end_parallel_scan(SharedTuplestoreAccessor *accessor)
void sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data, MinimalTuple tuple)
void sts_begin_parallel_scan(SharedTuplestoreAccessor *accessor)
#define SHARED_TUPLESTORE_SINGLE_PASS
void * shm_toc_allocate(shm_toc *toc, Size nbytes)
Definition: shm_toc.c:88
void shm_toc_insert(shm_toc *toc, uint64 key, void *address)
Definition: shm_toc.c:171
void * shm_toc_lookup(shm_toc *toc, uint64 key, bool noError)
Definition: shm_toc.c:232
#define shm_toc_estimate_chunk(e, sz)
Definition: shm_toc.h:51
#define shm_toc_estimate_keys(e, cnt)
Definition: shm_toc.h:53
Size add_size(Size s1, Size s2)
Definition: shmem.c:494
Size mul_size(Size s1, Size s2)
Definition: shmem.c:511
Datum * values
Definition: lsyscache.h:54
float4 * numbers
Definition: lsyscache.h:57
TupleTableSlot * ecxt_innertuple
Definition: execnodes.h:275
TupleTableSlot * ecxt_outertuple
Definition: execnodes.h:277
HashJoinTuple hj_CurTuple
Definition: execnodes.h:2268
int hj_CurSkewBucketNo
Definition: execnodes.h:2267
ExprState * hashclauses
Definition: execnodes.h:2262
uint32 hj_CurHashValue
Definition: execnodes.h:2265
int hj_CurBucketNo
Definition: execnodes.h:2266
HashJoinTable hj_HashTable
Definition: execnodes.h:2264
TupleTableSlot * hj_HashTupleSlot
Definition: execnodes.h:2270
struct HashJoinTupleData ** unshared
Definition: hashjoin.h:311
HashMemoryChunk chunks
Definition: hashjoin.h:355
union HashJoinTableData::@109 buckets
ParallelHashJoinBatchAccessor * batches
Definition: hashjoin.h:361
MemoryContext hashCxt
Definition: hashjoin.h:350
double totalTuples
Definition: hashjoin.h:330
double partialTuples
Definition: hashjoin.h:331
ParallelHashJoinState * parallel_state
Definition: hashjoin.h:360
MemoryContext spillCxt
Definition: hashjoin.h:352
HashMemoryChunk current_chunk
Definition: hashjoin.h:358
Size spaceAllowedSkew
Definition: hashjoin.h:348
int * skewBucketNums
Definition: hashjoin.h:320
BufFile ** innerBatchFile
Definition: hashjoin.h:341
int log2_nbuckets_optimal
Definition: hashjoin.h:305
dsa_pointer_atomic * shared
Definition: hashjoin.h:313
dsa_area * area
Definition: hashjoin.h:359
BufFile ** outerBatchFile
Definition: hashjoin.h:342
dsa_pointer current_chunk_shared
Definition: hashjoin.h:362
MemoryContext batchCxt
Definition: hashjoin.h:351
double skewTuples
Definition: hashjoin.h:332
HashSkewBucket ** skewBucket
Definition: hashjoin.h:317
dsa_pointer shared
Definition: hashjoin.h:84
uint32 hashvalue
Definition: hashjoin.h:86
struct HashJoinTupleData * unshared
Definition: hashjoin.h:83
union HashJoinTupleData::@107 next
union HashMemoryChunkData::@108 next
struct HashMemoryChunkData * unshared
Definition: hashjoin.h:137
dsa_pointer shared
Definition: hashjoin.h:138
HashJoinTuple tuples
Definition: hashjoin.h:116
uint32 hashvalue
Definition: hashjoin.h:115
struct ParallelHashJoinState * parallel_state
Definition: execnodes.h:2840
HashJoinTable hashtable
Definition: execnodes.h:2818
SharedHashInfo * shared_info
Definition: execnodes.h:2830
ExprState * hash_expr
Definition: execnodes.h:2819
Oid skew_collation
Definition: execnodes.h:2822
FmgrInfo * skew_hashfunction
Definition: execnodes.h:2821
PlanState ps
Definition: execnodes.h:2817
HashInstrumentation * hinstrument
Definition: execnodes.h:2837
AttrNumber skewColumn
Definition: plannodes.h:1413
Oid skewTable
Definition: plannodes.h:1411
bool skewInherit
Definition: plannodes.h:1415
Cardinality rows_total
Definition: plannodes.h:1418
Plan plan
Definition: plannodes.h:1402
Definition: nodes.h:135
shm_toc_estimator estimator
Definition: parallel.h:41
shm_toc * toc
Definition: parallel.h:44
SharedTuplestoreAccessor * outer_tuples
Definition: hashjoin.h:221
ParallelHashJoinBatch * shared
Definition: hashjoin.h:209
SharedTuplestoreAccessor * inner_tuples
Definition: hashjoin.h:220
dsa_pointer chunks
Definition: hashjoin.h:167
dsa_pointer buckets
Definition: hashjoin.h:164
Barrier grow_batches_barrier
Definition: hashjoin.h:261
dsa_pointer old_batches
Definition: hashjoin.h:249
dsa_pointer chunk_work_queue
Definition: hashjoin.h:254
Barrier grow_buckets_barrier
Definition: hashjoin.h:262
ParallelHashGrowth growth
Definition: hashjoin.h:253
SharedFileSet fileset
Definition: hashjoin.h:265
dsa_pointer batches
Definition: hashjoin.h:248
Instrumentation * instrument
Definition: execnodes.h:1175
Plan * plan
Definition: execnodes.h:1165
EState * state
Definition: execnodes.h:1167
ExprContext * ps_ExprContext
Definition: execnodes.h:1204
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1205
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1171
bool parallel_aware
Definition: plannodes.h:213
List * qual
Definition: plannodes.h:231
int plan_width
Definition: plannodes.h:207
Cardinality plan_rows
Definition: plannodes.h:205
int plan_node_id
Definition: plannodes.h:227
HashInstrumentation hinstrument[FLEXIBLE_ARRAY_MEMBER]
Definition: execnodes.h:2808
Definition: regguts.h:323
void ReleaseSysCache(HeapTuple tuple)
Definition: syscache.c:264
HeapTuple SearchSysCache3(int cacheId, Datum key1, Datum key2, Datum key3)
Definition: syscache.c:240
#define TupIsNull(slot)
Definition: tuptable.h:309
const char * name