PostgreSQL Source Code git master
nodeSetOp.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nodeSetOp.c
4 * Routines to handle INTERSECT and EXCEPT selection
5 *
6 * The input of a SetOp node consists of two relations (outer and inner)
7 * with identical column sets. In EXCEPT queries the outer relation is
8 * always the left side, while in INTERSECT cases the planner tries to
9 * make the outer relation be the smaller of the two inputs.
10 *
11 * In SETOP_SORTED mode, each input has been sorted according to all the
12 * grouping columns. The SetOp node essentially performs a merge join on
13 * the grouping columns, except that it is only interested in counting how
14 * many tuples from each input match. Then it is a simple matter to emit
15 * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 * or EXCEPT ALL.
17 *
18 * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 * We read the outer relation and build a hash table in memory with one entry
20 * for each group of identical tuples, counting the number of tuples in the
21 * group. Then we read the inner relation and count the number of tuples
22 * matching each outer group. (We can disregard any tuples appearing only
23 * in the inner relation, since they cannot result in any output.) After
24 * seeing all the input, we scan the hashtable and generate the correct
25 * output using those counts.
26 *
27 * This node type is not used for UNION or UNION ALL, since those can be
28 * implemented more cheaply (there's no need to count the number of
29 * matching tuples).
30 *
31 * Note that SetOp does no qual checking nor projection. The delivered
32 * output tuples are just copies of the first-to-arrive tuple in each
33 * input group.
34 *
35 *
36 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
37 * Portions Copyright (c) 1994, Regents of the University of California
38 *
39 *
40 * IDENTIFICATION
41 * src/backend/executor/nodeSetOp.c
42 *
43 *-------------------------------------------------------------------------
44 */
45
46#include "postgres.h"
47
48#include "access/htup_details.h"
49#include "executor/executor.h"
50#include "executor/nodeSetOp.h"
51#include "miscadmin.h"
52#include "utils/memutils.h"
53
54
55/*
56 * SetOpStatePerGroupData - per-group working state
57 *
58 * In SETOP_SORTED mode, we need only one of these structs, and it's just a
59 * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
60 * contains one of these for each tuple group.
61 */
63{
64 int64 numLeft; /* number of left-input dups in group */
65 int64 numRight; /* number of right-input dups in group */
67
69
70
72static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
73 SetOpState *setopstate);
75 SetOpState *setopstate);
76static void setop_fill_hash_table(SetOpState *setopstate);
78
79
80/*
81 * Initialize the hash table to empty.
82 */
83static void
85{
86 SetOp *node = (SetOp *) setopstate->ps.plan;
87 ExprContext *econtext = setopstate->ps.ps_ExprContext;
88 TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
89
91
92 /*
93 * If both child plans deliver the same fixed tuple slot type, we can tell
94 * BuildTupleHashTable to expect that slot type as input. Otherwise,
95 * we'll pass NULL denoting that any slot type is possible.
96 */
97 setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
98 desc,
99 ExecGetCommonChildSlotOps(&setopstate->ps),
100 node->numCols,
101 node->cmpColIdx,
102 setopstate->eqfuncoids,
103 setopstate->hashfunctions,
104 node->cmpCollations,
105 node->numGroups,
107 setopstate->ps.state->es_query_cxt,
108 setopstate->tuplesContext,
109 econtext->ecxt_per_tuple_memory,
110 false);
111}
112
113/* Planner support routine to estimate space needed for hash table */
114Size
115EstimateSetOpHashTableSpace(double nentries, Size tupleWidth)
116{
117 return EstimateTupleHashTableSpace(nentries,
118 tupleWidth,
119 sizeof(SetOpStatePerGroupData));
120}
121
122/*
123 * We've completed processing a tuple group. Decide how many copies (if any)
124 * of its representative row to emit, and store the count into numOutput.
125 * This logic is straight from the SQL92 specification.
126 */
127static void
129{
130 SetOp *plannode = (SetOp *) setopstate->ps.plan;
131
132 switch (plannode->cmd)
133 {
135 if (pergroup->numLeft > 0 && pergroup->numRight > 0)
136 setopstate->numOutput = 1;
137 else
138 setopstate->numOutput = 0;
139 break;
141 setopstate->numOutput =
142 (pergroup->numLeft < pergroup->numRight) ?
143 pergroup->numLeft : pergroup->numRight;
144 break;
145 case SETOPCMD_EXCEPT:
146 if (pergroup->numLeft > 0 && pergroup->numRight == 0)
147 setopstate->numOutput = 1;
148 else
149 setopstate->numOutput = 0;
150 break;
152 setopstate->numOutput =
153 (pergroup->numLeft < pergroup->numRight) ?
154 0 : (pergroup->numLeft - pergroup->numRight);
155 break;
156 default:
157 elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
158 break;
159 }
160}
161
162
163/* ----------------------------------------------------------------
164 * ExecSetOp
165 * ----------------------------------------------------------------
166 */
167static TupleTableSlot * /* return: a tuple or NULL */
169{
170 SetOpState *node = castNode(SetOpState, pstate);
171 SetOp *plannode = (SetOp *) node->ps.plan;
172 TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
173
175
176 /*
177 * If the previously-returned tuple needs to be returned more than once,
178 * keep returning it.
179 */
180 if (node->numOutput > 0)
181 {
182 node->numOutput--;
183 return resultTupleSlot;
184 }
185
186 /* Otherwise, we're done if we are out of groups */
187 if (node->setop_done)
188 return NULL;
189
190 /* Fetch the next tuple group according to the correct strategy */
191 if (plannode->strategy == SETOP_HASHED)
192 {
193 if (!node->table_filled)
195 return setop_retrieve_hash_table(node);
196 }
197 else
198 return setop_retrieve_sorted(node);
199}
200
201/*
202 * ExecSetOp for non-hashed case
203 */
204static TupleTableSlot *
206{
209 TupleTableSlot *resultTupleSlot;
210
211 /*
212 * get state info from node
213 */
214 outerPlan = outerPlanState(setopstate);
215 innerPlan = innerPlanState(setopstate);
216 resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
217
218 /*
219 * If first time through, establish the invariant that setop_load_group
220 * expects: each side's nextTupleSlot is the next output from the child
221 * plan, or empty if there is no more output from it.
222 */
223 if (setopstate->need_init)
224 {
225 setopstate->need_init = false;
226
228
229 /*
230 * If the outer relation is empty, then we will emit nothing, and we
231 * don't need to read the inner relation at all.
232 */
233 if (TupIsNull(setopstate->leftInput.nextTupleSlot))
234 {
235 setopstate->setop_done = true;
236 return NULL;
237 }
238
240
241 /* Set flags that we've not completed either side's group */
242 setopstate->leftInput.needGroup = true;
243 setopstate->rightInput.needGroup = true;
244 }
245
246 /*
247 * We loop retrieving groups until we find one we should return
248 */
249 while (!setopstate->setop_done)
250 {
251 int cmpresult;
252 SetOpStatePerGroupData pergroup;
253
254 /*
255 * Fetch the rest of the current outer group, if we didn't already.
256 */
257 if (setopstate->leftInput.needGroup)
258 setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
259
260 /*
261 * If no more outer groups, we're done, and don't need to look at any
262 * more of the inner relation.
263 */
264 if (setopstate->leftInput.numTuples == 0)
265 {
266 setopstate->setop_done = true;
267 break;
268 }
269
270 /*
271 * Fetch the rest of the current inner group, if we didn't already.
272 */
273 if (setopstate->rightInput.needGroup)
274 setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
275
276 /*
277 * Determine whether we have matching groups on both sides (this is
278 * basically like the core logic of a merge join).
279 */
280 if (setopstate->rightInput.numTuples == 0)
281 cmpresult = -1; /* as though left input is lesser */
282 else
283 cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
284 setopstate->rightInput.firstTupleSlot,
285 setopstate);
286
287 if (cmpresult < 0)
288 {
289 /* Left group is first, and has no right matches */
290 pergroup.numLeft = setopstate->leftInput.numTuples;
291 pergroup.numRight = 0;
292 /* We'll need another left group next time */
293 setopstate->leftInput.needGroup = true;
294 }
295 else if (cmpresult == 0)
296 {
297 /* We have matching groups */
298 pergroup.numLeft = setopstate->leftInput.numTuples;
299 pergroup.numRight = setopstate->rightInput.numTuples;
300 /* We'll need to read from both sides next time */
301 setopstate->leftInput.needGroup = true;
302 setopstate->rightInput.needGroup = true;
303 }
304 else
305 {
306 /* Right group has no left matches, so we can ignore it */
307 setopstate->rightInput.needGroup = true;
308 continue;
309 }
310
311 /*
312 * Done scanning these input tuple groups. See if we should emit any
313 * copies of result tuple, and if so return the first copy. (Note
314 * that the result tuple is the same as the left input's firstTuple
315 * slot.)
316 */
317 set_output_count(setopstate, &pergroup);
318
319 if (setopstate->numOutput > 0)
320 {
321 setopstate->numOutput--;
322 return resultTupleSlot;
323 }
324 }
325
326 /* No more groups */
327 ExecClearTuple(resultTupleSlot);
328 return NULL;
329}
330
331/*
332 * Load next group of tuples from one child plan or the other.
333 *
334 * On entry, we've already read the first tuple of the next group
335 * (if there is one) into input->nextTupleSlot. This invariant
336 * is maintained on exit.
337 */
338static void
340 SetOpState *setopstate)
341{
342 input->needGroup = false;
343
344 /* If we've exhausted this child plan, report an empty group */
345 if (TupIsNull(input->nextTupleSlot))
346 {
347 ExecClearTuple(input->firstTupleSlot);
348 input->numTuples = 0;
349 return;
350 }
351
352 /* Make a local copy of the first tuple for comparisons */
354 input->firstTupleSlot,
355 true);
356 /* and count it */
357 input->numTuples = 1;
358
359 /* Scan till we find the end-of-group */
360 for (;;)
361 {
362 int cmpresult;
363
364 /* Get next input tuple, if there is one */
365 input->nextTupleSlot = ExecProcNode(inputPlan);
366 if (TupIsNull(input->nextTupleSlot))
367 break;
368
369 /* There is; does it belong to same group as firstTuple? */
370 cmpresult = setop_compare_slots(input->firstTupleSlot,
371 input->nextTupleSlot,
372 setopstate);
373 Assert(cmpresult <= 0); /* else input is mis-sorted */
374 if (cmpresult != 0)
375 break;
376
377 /* Still in same group, so count this tuple */
378 input->numTuples++;
379 }
380}
381
382/*
383 * Compare the tuples in the two given slots.
384 */
385static int
387 SetOpState *setopstate)
388{
389 /* We'll often need to fetch all the columns, so just do it */
392 for (int nkey = 0; nkey < setopstate->numCols; nkey++)
393 {
394 SortSupport sortKey = setopstate->sortKeys + nkey;
395 AttrNumber attno = sortKey->ssup_attno;
396 Datum datum1 = s1->tts_values[attno - 1],
397 datum2 = s2->tts_values[attno - 1];
398 bool isNull1 = s1->tts_isnull[attno - 1],
399 isNull2 = s2->tts_isnull[attno - 1];
400 int compare;
401
402 compare = ApplySortComparator(datum1, isNull1,
403 datum2, isNull2,
404 sortKey);
405 if (compare != 0)
406 return compare;
407 }
408 return 0;
409}
410
411/*
412 * ExecSetOp for hashed case: phase 1, read inputs and build hash table
413 */
414static void
416{
419 ExprContext *econtext = setopstate->ps.ps_ExprContext;
420 bool have_tuples = false;
421
422 /*
423 * get state info from node
424 */
425 outerPlan = outerPlanState(setopstate);
426 innerPlan = innerPlanState(setopstate);
427
428 /*
429 * Process each outer-plan tuple, and then fetch the next one, until we
430 * exhaust the outer plan.
431 */
432 for (;;)
433 {
434 TupleTableSlot *outerslot;
435 TupleHashTable hashtable = setopstate->hashtable;
436 TupleHashEntryData *entry;
437 SetOpStatePerGroup pergroup;
438 bool isnew;
439
440 outerslot = ExecProcNode(outerPlan);
441 if (TupIsNull(outerslot))
442 break;
443 have_tuples = true;
444
445 /* Find or build hashtable entry for this tuple's group */
446 entry = LookupTupleHashEntry(hashtable,
447 outerslot,
448 &isnew, NULL);
449
450 pergroup = TupleHashEntryGetAdditional(hashtable, entry);
451 /* If new tuple group, initialize counts to zero */
452 if (isnew)
453 {
454 pergroup->numLeft = 0;
455 pergroup->numRight = 0;
456 }
457
458 /* Advance the counts */
459 pergroup->numLeft++;
460
461 /* Must reset expression context after each hashtable lookup */
462 ResetExprContext(econtext);
463 }
464
465 /*
466 * If the outer relation is empty, then we will emit nothing, and we don't
467 * need to read the inner relation at all.
468 */
469 if (have_tuples)
470 {
471 /*
472 * Process each inner-plan tuple, and then fetch the next one, until
473 * we exhaust the inner plan.
474 */
475 for (;;)
476 {
477 TupleTableSlot *innerslot;
478 TupleHashTable hashtable = setopstate->hashtable;
479 TupleHashEntryData *entry;
480
481 innerslot = ExecProcNode(innerPlan);
482 if (TupIsNull(innerslot))
483 break;
484
485 /* For tuples not seen previously, do not make hashtable entry */
486 entry = LookupTupleHashEntry(hashtable,
487 innerslot,
488 NULL, NULL);
489
490 /* Advance the counts if entry is already present */
491 if (entry)
492 {
493 SetOpStatePerGroup pergroup = TupleHashEntryGetAdditional(hashtable, entry);
494
495 pergroup->numRight++;
496 }
497
498 /* Must reset expression context after each hashtable lookup */
499 ResetExprContext(econtext);
500 }
501 }
502
503 setopstate->table_filled = true;
504 /* Initialize to walk the hash table */
505 ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
506}
507
508/*
509 * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
510 */
511static TupleTableSlot *
513{
514 TupleHashEntry entry;
515 TupleTableSlot *resultTupleSlot;
516
517 /*
518 * get state info from node
519 */
520 resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
521
522 /*
523 * We loop retrieving groups until we find one we should return
524 */
525 while (!setopstate->setop_done)
526 {
527 TupleHashTable hashtable = setopstate->hashtable;
528 SetOpStatePerGroup pergroup;
529
531
532 /*
533 * Find the next entry in the hash table
534 */
535 entry = ScanTupleHashTable(hashtable, &setopstate->hashiter);
536 if (entry == NULL)
537 {
538 /* No more entries in hashtable, so done */
539 setopstate->setop_done = true;
540 return NULL;
541 }
542
543 /*
544 * See if we should emit any copies of this tuple, and if so return
545 * the first copy.
546 */
547 pergroup = TupleHashEntryGetAdditional(hashtable, entry);
548 set_output_count(setopstate, pergroup);
549
550 if (setopstate->numOutput > 0)
551 {
552 setopstate->numOutput--;
554 resultTupleSlot,
555 false);
556 }
557 }
558
559 /* No more groups */
560 ExecClearTuple(resultTupleSlot);
561 return NULL;
562}
563
564/* ----------------------------------------------------------------
565 * ExecInitSetOp
566 *
567 * This initializes the setop node state structures and
568 * the node's subplan.
569 * ----------------------------------------------------------------
570 */
572ExecInitSetOp(SetOp *node, EState *estate, int eflags)
573{
574 SetOpState *setopstate;
575
576 /* check for unsupported flags */
578
579 /*
580 * create state structure
581 */
582 setopstate = makeNode(SetOpState);
583 setopstate->ps.plan = (Plan *) node;
584 setopstate->ps.state = estate;
585 setopstate->ps.ExecProcNode = ExecSetOp;
586
587 setopstate->setop_done = false;
588 setopstate->numOutput = 0;
589 setopstate->numCols = node->numCols;
590 setopstate->need_init = true;
591
592 /*
593 * create expression context
594 */
595 ExecAssignExprContext(estate, &setopstate->ps);
596
597 /*
598 * If hashing, we also need a longer-lived context to store the hash
599 * table. The table can't just be kept in the per-query context because
600 * we want to be able to throw it away in ExecReScanSetOp. We can use a
601 * BumpContext to save storage, because we will have no need to delete
602 * individual table entries.
603 */
604 if (node->strategy == SETOP_HASHED)
605 setopstate->tuplesContext =
607 "SetOp hashed tuples",
609
610 /*
611 * initialize child nodes
612 *
613 * If we are hashing then the child plans do not need to handle REWIND
614 * efficiently; see ExecReScanSetOp.
615 */
616 if (node->strategy == SETOP_HASHED)
617 eflags &= ~EXEC_FLAG_REWIND;
618 outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
619 innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
620
621 /*
622 * Initialize locally-allocated slots. In hashed mode, we just need a
623 * result slot. In sorted mode, we need one first-tuple-of-group slot for
624 * each input; we use the result slot for the left input's slot and create
625 * another for the right input. (Note: the nextTupleSlot slots are not
626 * ours, but just point to the last slot returned by the input plan node.)
627 */
629 if (node->strategy != SETOP_HASHED)
630 {
631 setopstate->leftInput.firstTupleSlot =
632 setopstate->ps.ps_ResultTupleSlot;
633 setopstate->rightInput.firstTupleSlot =
635 setopstate->ps.ps_ResultTupleDesc,
637 }
638
639 /* Setop nodes do no projections. */
640 setopstate->ps.ps_ProjInfo = NULL;
641
642 /*
643 * Precompute fmgr lookup data for inner loop. We need equality and
644 * hashing functions to do it by hashing, while for sorting we need
645 * SortSupport data.
646 */
647 if (node->strategy == SETOP_HASHED)
649 node->cmpOperators,
650 &setopstate->eqfuncoids,
651 &setopstate->hashfunctions);
652 else
653 {
654 int nkeys = node->numCols;
655
656 setopstate->sortKeys = (SortSupport)
657 palloc0(nkeys * sizeof(SortSupportData));
658 for (int i = 0; i < nkeys; i++)
659 {
660 SortSupport sortKey = setopstate->sortKeys + i;
661
663 sortKey->ssup_collation = node->cmpCollations[i];
664 sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
665 sortKey->ssup_attno = node->cmpColIdx[i];
666 /* abbreviated key conversion is not useful here */
667 sortKey->abbreviate = false;
668
669 PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
670 }
671 }
672
673 /* Create a hash table if needed */
674 if (node->strategy == SETOP_HASHED)
675 {
676 build_hash_table(setopstate);
677 setopstate->table_filled = false;
678 }
679
680 return setopstate;
681}
682
683/* ----------------------------------------------------------------
684 * ExecEndSetOp
685 *
686 * This shuts down the subplans and frees resources allocated
687 * to this node.
688 * ----------------------------------------------------------------
689 */
690void
692{
693 /* free subsidiary stuff including hashtable data */
694 if (node->tuplesContext)
696
699}
700
701
702void
704{
707
709 node->setop_done = false;
710 node->numOutput = 0;
711
712 if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
713 {
714 /*
715 * In the hashed case, if we haven't yet built the hash table then we
716 * can just return; nothing done yet, so nothing to undo. If subnode's
717 * chgParam is not NULL then it will be re-scanned by ExecProcNode,
718 * else no reason to re-scan it at all.
719 */
720 if (!node->table_filled)
721 return;
722
723 /*
724 * If we do have the hash table and the subplans do not have any
725 * parameter changes, then we can just rescan the existing hash table;
726 * no need to build it again.
727 */
728 if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
729 {
731 return;
732 }
733
734 /* Else, we must rebuild the hashtable */
736 node->table_filled = false;
737 }
738 else
739 {
740 /* Need to re-read first input from each side */
741 node->need_init = true;
742 }
743
744 /*
745 * if chgParam of subnode is not null then plan will be re-scanned by
746 * first ExecProcNode.
747 */
748 if (outerPlan->chgParam == NULL)
750 if (innerPlan->chgParam == NULL)
752}
int16 AttrNumber
Definition: attnum.h:21
MemoryContext BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: bump.c:133
int64_t int64
Definition: c.h:540
size_t Size
Definition: c.h:615
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void ExecReScan(PlanState *node)
Definition: execAmi.c:77
void execTuplesHashPrepare(int numCols, const Oid *eqOperators, Oid **eqFuncOids, FmgrInfo **hashFunctions)
Definition: execGrouping.c:100
TupleHashTable BuildTupleHashTable(PlanState *parent, TupleDesc inputDesc, const TupleTableSlotOps *inputOps, int numCols, AttrNumber *keyColIdx, const Oid *eqfuncoids, FmgrInfo *hashfunctions, Oid *collations, double nelements, Size additionalsize, MemoryContext metacxt, MemoryContext tuplescxt, MemoryContext tempcxt, bool use_variable_hash_iv)
Definition: execGrouping.c:184
TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, bool *isnew, uint32 *hash)
Definition: execGrouping.c:382
Size EstimateTupleHashTableSpace(double nentries, Size tupleWidth, Size additionalsize)
Definition: execGrouping.c:321
void ResetTupleHashTable(TupleHashTable hashtable)
Definition: execGrouping.c:302
void ExecEndNode(PlanState *node)
Definition: execProcnode.c:562
PlanState * ExecInitNode(Plan *node, EState *estate, int eflags)
Definition: execProcnode.c:142
TupleTableSlot * ExecStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot, bool shouldFree)
Definition: execTuples.c:1635
TupleTableSlot * ExecInitExtraTupleSlot(EState *estate, TupleDesc tupledesc, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:2020
void ExecInitResultTupleSlotTL(PlanState *planstate, const TupleTableSlotOps *tts_ops)
Definition: execTuples.c:1988
const TupleTableSlotOps TTSOpsMinimalTuple
Definition: execTuples.c:86
TupleDesc ExecGetResultType(PlanState *planstate)
Definition: execUtils.c:495
void ExecAssignExprContext(EState *estate, PlanState *planstate)
Definition: execUtils.c:485
const TupleTableSlotOps * ExecGetCommonChildSlotOps(PlanState *ps)
Definition: execUtils.c:563
#define outerPlanState(node)
Definition: execnodes.h:1261
#define ScanTupleHashTable(htable, iter)
Definition: execnodes.h:900
#define ResetTupleHashIterator(htable, iter)
Definition: execnodes.h:898
#define innerPlanState(node)
Definition: execnodes.h:1260
static MinimalTuple TupleHashEntryGetTuple(TupleHashEntry entry)
Definition: executor.h:178
#define EXEC_FLAG_BACKWARD
Definition: executor.h:69
static void * TupleHashEntryGetAdditional(TupleHashTable hashtable, TupleHashEntry entry)
Definition: executor.h:192
#define ResetExprContext(econtext)
Definition: executor.h:650
static TupleTableSlot * ExecProcNode(PlanState *node)
Definition: executor.h:314
#define EXEC_FLAG_MARK
Definition: executor.h:70
static int compare(const void *arg1, const void *arg2)
Definition: geqo_pool.c:145
Assert(PointerIsAligned(start, uint64))
FILE * input
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void * palloc0(Size size)
Definition: mcxt.c:1395
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:469
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
Size EstimateSetOpHashTableSpace(double nentries, Size tupleWidth)
Definition: nodeSetOp.c:115
static void setop_fill_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:415
static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan, SetOpState *setopstate)
Definition: nodeSetOp.c:339
static void build_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:84
void ExecEndSetOp(SetOpState *node)
Definition: nodeSetOp.c:691
static void set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
Definition: nodeSetOp.c:128
static TupleTableSlot * ExecSetOp(PlanState *pstate)
Definition: nodeSetOp.c:168
static TupleTableSlot * setop_retrieve_hash_table(SetOpState *setopstate)
Definition: nodeSetOp.c:512
static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2, SetOpState *setopstate)
Definition: nodeSetOp.c:386
static TupleTableSlot * setop_retrieve_sorted(SetOpState *setopstate)
Definition: nodeSetOp.c:205
struct SetOpStatePerGroupData SetOpStatePerGroupData
SetOpState * ExecInitSetOp(SetOp *node, EState *estate, int eflags)
Definition: nodeSetOp.c:572
SetOpStatePerGroupData * SetOpStatePerGroup
Definition: nodeSetOp.c:68
void ExecReScanSetOp(SetOpState *node)
Definition: nodeSetOp.c:703
@ SETOPCMD_EXCEPT
Definition: nodes.h:410
@ SETOPCMD_EXCEPT_ALL
Definition: nodes.h:411
@ SETOPCMD_INTERSECT_ALL
Definition: nodes.h:409
@ SETOPCMD_INTERSECT
Definition: nodes.h:408
@ SETOP_HASHED
Definition: nodes.h:417
#define makeNode(_type_)
Definition: nodes.h:161
#define castNode(_type_, nodeptr)
Definition: nodes.h:182
#define innerPlan(node)
Definition: plannodes.h:260
#define outerPlan(node)
Definition: plannodes.h:261
uint64_t Datum
Definition: postgres.h:70
char * s1
char * s2
void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup)
Definition: sortsupport.c:134
struct SortSupportData * SortSupport
Definition: sortsupport.h:58
static int ApplySortComparator(Datum datum1, bool isNull1, Datum datum2, bool isNull2, SortSupport ssup)
Definition: sortsupport.h:200
MemoryContext es_query_cxt
Definition: execnodes.h:710
Plan * plan
Definition: execnodes.h:1165
EState * state
Definition: execnodes.h:1167
TupleDesc ps_ResultTupleDesc
Definition: execnodes.h:1202
ExprContext * ps_ExprContext
Definition: execnodes.h:1204
TupleTableSlot * ps_ResultTupleSlot
Definition: execnodes.h:1203
ProjectionInfo * ps_ProjInfo
Definition: execnodes.h:1205
ExecProcNodeMtd ExecProcNode
Definition: execnodes.h:1171
TupleTableSlot * nextTupleSlot
Definition: execnodes.h:2854
TupleTableSlot * firstTupleSlot
Definition: execnodes.h:2852
bool need_init
Definition: execnodes.h:2869
SortSupport sortKeys
Definition: execnodes.h:2866
MemoryContext tuplesContext
Definition: execnodes.h:2875
TupleHashIterator hashiter
Definition: execnodes.h:2877
bool table_filled
Definition: execnodes.h:2876
SetOpStatePerInput rightInput
Definition: execnodes.h:2868
PlanState ps
Definition: execnodes.h:2860
Oid * eqfuncoids
Definition: execnodes.h:2872
TupleHashTable hashtable
Definition: execnodes.h:2874
FmgrInfo * hashfunctions
Definition: execnodes.h:2873
SetOpStatePerInput leftInput
Definition: execnodes.h:2867
int64 numOutput
Definition: execnodes.h:2862
bool setop_done
Definition: execnodes.h:2861
SetOpStrategy strategy
Definition: plannodes.h:1433
SetOpCmd cmd
Definition: plannodes.h:1430
int numCols
Definition: plannodes.h:1436
Cardinality numGroups
Definition: plannodes.h:1449
AttrNumber ssup_attno
Definition: sortsupport.h:81
bool ssup_nulls_first
Definition: sortsupport.h:75
MemoryContext ssup_cxt
Definition: sortsupport.h:66
static MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot)
Definition: tuptable.h:495
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:457
#define TupIsNull(slot)
Definition: tuptable.h:309
static void slot_getallattrs(TupleTableSlot *slot)
Definition: tuptable.h:371