Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeSetOp.c
4 : * Routines to handle INTERSECT and EXCEPT selection
5 : *
6 : * The input of a SetOp node consists of two relations (outer and inner)
7 : * with identical column sets. In EXCEPT queries the outer relation is
8 : * always the left side, while in INTERSECT cases the planner tries to
9 : * make the outer relation be the smaller of the two inputs.
10 : *
11 : * In SETOP_SORTED mode, each input has been sorted according to all the
12 : * grouping columns. The SetOp node essentially performs a merge join on
13 : * the grouping columns, except that it is only interested in counting how
14 : * many tuples from each input match. Then it is a simple matter to emit
15 : * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 : * or EXCEPT ALL.
17 : *
18 : * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 : * We read the outer relation and build a hash table in memory with one entry
20 : * for each group of identical tuples, counting the number of tuples in the
21 : * group. Then we read the inner relation and count the number of tuples
22 : * matching each outer group. (We can disregard any tuples appearing only
23 : * in the inner relation, since they cannot result in any output.) After
24 : * seeing all the input, we scan the hashtable and generate the correct
25 : * output using those counts.
26 : *
27 : * This node type is not used for UNION or UNION ALL, since those can be
28 : * implemented more cheaply (there's no need to count the number of
29 : * matching tuples).
30 : *
31 : * Note that SetOp does no qual checking nor projection. The delivered
32 : * output tuples are just copies of the first-to-arrive tuple in each
33 : * input group.
34 : *
35 : *
36 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
37 : * Portions Copyright (c) 1994, Regents of the University of California
38 : *
39 : *
40 : * IDENTIFICATION
41 : * src/backend/executor/nodeSetOp.c
42 : *
43 : *-------------------------------------------------------------------------
44 : */
45 :
46 : #include "postgres.h"
47 :
48 : #include "access/htup_details.h"
49 : #include "executor/executor.h"
50 : #include "executor/nodeSetOp.h"
51 : #include "miscadmin.h"
52 : #include "utils/memutils.h"
53 :
54 :
55 : /*
56 : * SetOpStatePerGroupData - per-group working state
57 : *
58 : * In SETOP_SORTED mode, we need only one of these structs, and it's just a
59 : * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
60 : * contains one of these for each tuple group.
61 : */
62 : typedef struct SetOpStatePerGroupData
63 : {
64 : int64 numLeft; /* number of left-input dups in group */
65 : int64 numRight; /* number of right-input dups in group */
66 : } SetOpStatePerGroupData;
67 :
68 : typedef SetOpStatePerGroupData *SetOpStatePerGroup;
69 :
70 :
71 : static TupleTableSlot *setop_retrieve_sorted(SetOpState *setopstate);
72 : static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
73 : SetOpState *setopstate);
74 : static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
75 : SetOpState *setopstate);
76 : static void setop_fill_hash_table(SetOpState *setopstate);
77 : static TupleTableSlot *setop_retrieve_hash_table(SetOpState *setopstate);
78 :
79 :
80 : /*
81 : * Initialize the hash table to empty.
82 : */
83 : static void
84 374 : build_hash_table(SetOpState *setopstate)
85 : {
86 374 : SetOp *node = (SetOp *) setopstate->ps.plan;
87 374 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
88 374 : TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
89 :
90 : Assert(node->strategy == SETOP_HASHED);
91 : Assert(node->numGroups > 0);
92 :
93 : /*
94 : * If both child plans deliver the same fixed tuple slot type, we can tell
95 : * BuildTupleHashTable to expect that slot type as input. Otherwise,
96 : * we'll pass NULL denoting that any slot type is possible.
97 : */
98 374 : setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
99 : desc,
100 : ExecGetCommonChildSlotOps(&setopstate->ps),
101 : node->numCols,
102 : node->cmpColIdx,
103 374 : setopstate->eqfuncoids,
104 : setopstate->hashfunctions,
105 : node->cmpCollations,
106 : node->numGroups,
107 : sizeof(SetOpStatePerGroupData),
108 374 : setopstate->ps.state->es_query_cxt,
109 : setopstate->tableContext,
110 : econtext->ecxt_per_tuple_memory,
111 : false);
112 374 : }
113 :
114 : /*
115 : * We've completed processing a tuple group. Decide how many copies (if any)
116 : * of its representative row to emit, and store the count into numOutput.
117 : * This logic is straight from the SQL92 specification.
118 : */
119 : static void
120 442720 : set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
121 : {
122 442720 : SetOp *plannode = (SetOp *) setopstate->ps.plan;
123 :
124 442720 : switch (plannode->cmd)
125 : {
126 60360 : case SETOPCMD_INTERSECT:
127 60360 : if (pergroup->numLeft > 0 && pergroup->numRight > 0)
128 60228 : setopstate->numOutput = 1;
129 : else
130 132 : setopstate->numOutput = 0;
131 60360 : break;
132 36 : case SETOPCMD_INTERSECT_ALL:
133 36 : setopstate->numOutput =
134 36 : (pergroup->numLeft < pergroup->numRight) ?
135 36 : pergroup->numLeft : pergroup->numRight;
136 36 : break;
137 370240 : case SETOPCMD_EXCEPT:
138 370240 : if (pergroup->numLeft > 0 && pergroup->numRight == 0)
139 1422 : setopstate->numOutput = 1;
140 : else
141 368818 : setopstate->numOutput = 0;
142 370240 : break;
143 12084 : case SETOPCMD_EXCEPT_ALL:
144 12084 : setopstate->numOutput =
145 12084 : (pergroup->numLeft < pergroup->numRight) ?
146 12084 : 0 : (pergroup->numLeft - pergroup->numRight);
147 12084 : break;
148 0 : default:
149 0 : elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
150 : break;
151 : }
152 442720 : }
153 :
154 :
155 : /* ----------------------------------------------------------------
156 : * ExecSetOp
157 : * ----------------------------------------------------------------
158 : */
159 : static TupleTableSlot * /* return: a tuple or NULL */
160 63554 : ExecSetOp(PlanState *pstate)
161 : {
162 63554 : SetOpState *node = castNode(SetOpState, pstate);
163 63554 : SetOp *plannode = (SetOp *) node->ps.plan;
164 63554 : TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
165 :
166 63554 : CHECK_FOR_INTERRUPTS();
167 :
168 : /*
169 : * If the previously-returned tuple needs to be returned more than once,
170 : * keep returning it.
171 : */
172 63554 : if (node->numOutput > 0)
173 : {
174 48 : node->numOutput--;
175 48 : return resultTupleSlot;
176 : }
177 :
178 : /* Otherwise, we're done if we are out of groups */
179 63506 : if (node->setop_done)
180 0 : return NULL;
181 :
182 : /* Fetch the next tuple group according to the correct strategy */
183 63506 : if (plannode->strategy == SETOP_HASHED)
184 : {
185 31880 : if (!node->table_filled)
186 932 : setop_fill_hash_table(node);
187 31880 : return setop_retrieve_hash_table(node);
188 : }
189 : else
190 31626 : return setop_retrieve_sorted(node);
191 : }
192 :
193 : /*
194 : * ExecSetOp for non-hashed case
195 : */
196 : static TupleTableSlot *
197 31626 : setop_retrieve_sorted(SetOpState *setopstate)
198 : {
199 : PlanState *outerPlan;
200 : PlanState *innerPlan;
201 : TupleTableSlot *resultTupleSlot;
202 :
203 : /*
204 : * get state info from node
205 : */
206 31626 : outerPlan = outerPlanState(setopstate);
207 31626 : innerPlan = innerPlanState(setopstate);
208 31626 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
209 :
210 : /*
211 : * If first time through, establish the invariant that setop_load_group
212 : * expects: each side's nextTupleSlot is the next output from the child
213 : * plan, or empty if there is no more output from it.
214 : */
215 31626 : if (setopstate->need_init)
216 : {
217 834 : setopstate->need_init = false;
218 :
219 834 : setopstate->leftInput.nextTupleSlot = ExecProcNode(outerPlan);
220 :
221 : /*
222 : * If the outer relation is empty, then we will emit nothing, and we
223 : * don't need to read the inner relation at all.
224 : */
225 834 : if (TupIsNull(setopstate->leftInput.nextTupleSlot))
226 : {
227 0 : setopstate->setop_done = true;
228 0 : return NULL;
229 : }
230 :
231 834 : setopstate->rightInput.nextTupleSlot = ExecProcNode(innerPlan);
232 :
233 : /* Set flags that we've not completed either side's group */
234 834 : setopstate->leftInput.needGroup = true;
235 834 : setopstate->rightInput.needGroup = true;
236 : }
237 :
238 : /*
239 : * We loop retrieving groups until we find one we should return
240 : */
241 92178 : while (!setopstate->setop_done)
242 : {
243 : int cmpresult;
244 : SetOpStatePerGroupData pergroup;
245 :
246 : /*
247 : * Fetch the rest of the current outer group, if we didn't already.
248 : */
249 92178 : if (setopstate->leftInput.needGroup)
250 91872 : setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
251 :
252 : /*
253 : * If no more outer groups, we're done, and don't need to look at any
254 : * more of the inner relation.
255 : */
256 92178 : if (setopstate->leftInput.numTuples == 0)
257 : {
258 834 : setopstate->setop_done = true;
259 834 : break;
260 : }
261 :
262 : /*
263 : * Fetch the rest of the current inner group, if we didn't already.
264 : */
265 91344 : if (setopstate->rightInput.needGroup)
266 91266 : setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
267 :
268 : /*
269 : * Determine whether we have matching groups on both sides (this is
270 : * basically like the core logic of a merge join).
271 : */
272 91344 : if (setopstate->rightInput.numTuples == 0)
273 258 : cmpresult = -1; /* as though left input is lesser */
274 : else
275 91086 : cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
276 : setopstate->rightInput.firstTupleSlot,
277 : setopstate);
278 :
279 91344 : if (cmpresult < 0)
280 : {
281 : /* Left group is first, and has no right matches */
282 732 : pergroup.numLeft = setopstate->leftInput.numTuples;
283 732 : pergroup.numRight = 0;
284 : /* We'll need another left group next time */
285 732 : setopstate->leftInput.needGroup = true;
286 : }
287 90612 : else if (cmpresult == 0)
288 : {
289 : /* We have matching groups */
290 90306 : pergroup.numLeft = setopstate->leftInput.numTuples;
291 90306 : pergroup.numRight = setopstate->rightInput.numTuples;
292 : /* We'll need to read from both sides next time */
293 90306 : setopstate->leftInput.needGroup = true;
294 90306 : setopstate->rightInput.needGroup = true;
295 : }
296 : else
297 : {
298 : /* Right group has no left matches, so we can ignore it */
299 306 : setopstate->rightInput.needGroup = true;
300 306 : continue;
301 : }
302 :
303 : /*
304 : * Done scanning these input tuple groups. See if we should emit any
305 : * copies of result tuple, and if so return the first copy. (Note
306 : * that the result tuple is the same as the left input's firstTuple
307 : * slot.)
308 : */
309 91038 : set_output_count(setopstate, &pergroup);
310 :
311 91038 : if (setopstate->numOutput > 0)
312 : {
313 30792 : setopstate->numOutput--;
314 30792 : return resultTupleSlot;
315 : }
316 : }
317 :
318 : /* No more groups */
319 834 : ExecClearTuple(resultTupleSlot);
320 834 : return NULL;
321 : }
322 :
323 : /*
324 : * Load next group of tuples from one child plan or the other.
325 : *
326 : * On entry, we've already read the first tuple of the next group
327 : * (if there is one) into input->nextTupleSlot. This invariant
328 : * is maintained on exit.
329 : */
330 : static void
331 183138 : setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
332 : SetOpState *setopstate)
333 : {
334 183138 : input->needGroup = false;
335 :
336 : /* If we've exhausted this child plan, report an empty group */
337 183138 : if (TupIsNull(input->nextTupleSlot))
338 : {
339 1086 : ExecClearTuple(input->firstTupleSlot);
340 1086 : input->numTuples = 0;
341 1086 : return;
342 : }
343 :
344 : /* Make a local copy of the first tuple for comparisons */
345 182052 : ExecStoreMinimalTuple(ExecCopySlotMinimalTuple(input->nextTupleSlot),
346 : input->firstTupleSlot,
347 : true);
348 : /* and count it */
349 182052 : input->numTuples = 1;
350 :
351 : /* Scan till we find the end-of-group */
352 : for (;;)
353 30330 : {
354 : int cmpresult;
355 :
356 : /* Get next input tuple, if there is one */
357 212382 : input->nextTupleSlot = ExecProcNode(inputPlan);
358 212382 : if (TupIsNull(input->nextTupleSlot))
359 : break;
360 :
361 : /* There is; does it belong to same group as firstTuple? */
362 210720 : cmpresult = setop_compare_slots(input->firstTupleSlot,
363 : input->nextTupleSlot,
364 : setopstate);
365 : Assert(cmpresult <= 0); /* else input is mis-sorted */
366 210720 : if (cmpresult != 0)
367 180390 : break;
368 :
369 : /* Still in same group, so count this tuple */
370 30330 : input->numTuples++;
371 : }
372 : }
373 :
374 : /*
375 : * Compare the tuples in the two given slots.
376 : */
377 : static int
378 301806 : setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
379 : SetOpState *setopstate)
380 : {
381 : /* We'll often need to fetch all the columns, so just do it */
382 301806 : slot_getallattrs(s1);
383 301806 : slot_getallattrs(s2);
384 426462 : for (int nkey = 0; nkey < setopstate->numCols; nkey++)
385 : {
386 305826 : SortSupport sortKey = setopstate->sortKeys + nkey;
387 305826 : AttrNumber attno = sortKey->ssup_attno;
388 305826 : Datum datum1 = s1->tts_values[attno - 1],
389 305826 : datum2 = s2->tts_values[attno - 1];
390 305826 : bool isNull1 = s1->tts_isnull[attno - 1],
391 305826 : isNull2 = s2->tts_isnull[attno - 1];
392 : int compare;
393 :
394 305826 : compare = ApplySortComparator(datum1, isNull1,
395 : datum2, isNull2,
396 : sortKey);
397 305826 : if (compare != 0)
398 181170 : return compare;
399 : }
400 120636 : return 0;
401 : }
402 :
403 : /*
404 : * ExecSetOp for hashed case: phase 1, read inputs and build hash table
405 : */
406 : static void
407 932 : setop_fill_hash_table(SetOpState *setopstate)
408 : {
409 : PlanState *outerPlan;
410 : PlanState *innerPlan;
411 932 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
412 932 : bool have_tuples = false;
413 :
414 : /*
415 : * get state info from node
416 : */
417 932 : outerPlan = outerPlanState(setopstate);
418 932 : innerPlan = innerPlanState(setopstate);
419 :
420 : /*
421 : * Process each outer-plan tuple, and then fetch the next one, until we
422 : * exhaust the outer plan.
423 : */
424 : for (;;)
425 381876 : {
426 : TupleTableSlot *outerslot;
427 : TupleHashEntryData *entry;
428 : bool isnew;
429 :
430 382808 : outerslot = ExecProcNode(outerPlan);
431 382808 : if (TupIsNull(outerslot))
432 : break;
433 381876 : have_tuples = true;
434 :
435 : /* Find or build hashtable entry for this tuple's group */
436 381876 : entry = LookupTupleHashEntry(setopstate->hashtable,
437 : outerslot,
438 : &isnew, NULL);
439 :
440 : /* If new tuple group, initialize counts to zero */
441 381876 : if (isnew)
442 : {
443 351682 : entry->additional = (SetOpStatePerGroup)
444 351682 : MemoryContextAllocZero(setopstate->hashtable->tablecxt,
445 : sizeof(SetOpStatePerGroupData));
446 : }
447 :
448 : /* Advance the counts */
449 381876 : ((SetOpStatePerGroup) entry->additional)->numLeft++;
450 :
451 : /* Must reset expression context after each hashtable lookup */
452 381876 : ResetExprContext(econtext);
453 : }
454 :
455 : /*
456 : * If the outer relation is empty, then we will emit nothing, and we don't
457 : * need to read the inner relation at all.
458 : */
459 932 : if (have_tuples)
460 : {
461 : /*
462 : * Process each inner-plan tuple, and then fetch the next one, until
463 : * we exhaust the inner plan.
464 : */
465 : for (;;)
466 381804 : {
467 : TupleTableSlot *innerslot;
468 : TupleHashEntryData *entry;
469 :
470 382736 : innerslot = ExecProcNode(innerPlan);
471 382736 : if (TupIsNull(innerslot))
472 : break;
473 :
474 : /* For tuples not seen previously, do not make hashtable entry */
475 381804 : entry = LookupTupleHashEntry(setopstate->hashtable,
476 : innerslot,
477 : NULL, NULL);
478 :
479 : /* Advance the counts if entry is already present */
480 381804 : if (entry)
481 351000 : ((SetOpStatePerGroup) entry->additional)->numRight++;
482 :
483 : /* Must reset expression context after each hashtable lookup */
484 381804 : ResetExprContext(econtext);
485 : }
486 : }
487 :
488 932 : setopstate->table_filled = true;
489 : /* Initialize to walk the hash table */
490 932 : ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
491 932 : }
492 :
493 : /*
494 : * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
495 : */
496 : static TupleTableSlot *
497 31880 : setop_retrieve_hash_table(SetOpState *setopstate)
498 : {
499 : TupleHashEntryData *entry;
500 : TupleTableSlot *resultTupleSlot;
501 :
502 : /*
503 : * get state info from node
504 : */
505 31880 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
506 :
507 : /*
508 : * We loop retrieving groups until we find one we should return
509 : */
510 352614 : while (!setopstate->setop_done)
511 : {
512 352614 : CHECK_FOR_INTERRUPTS();
513 :
514 : /*
515 : * Find the next entry in the hash table
516 : */
517 352614 : entry = ScanTupleHashTable(setopstate->hashtable, &setopstate->hashiter);
518 352614 : if (entry == NULL)
519 : {
520 : /* No more entries in hashtable, so done */
521 932 : setopstate->setop_done = true;
522 932 : return NULL;
523 : }
524 :
525 : /*
526 : * See if we should emit any copies of this tuple, and if so return
527 : * the first copy.
528 : */
529 351682 : set_output_count(setopstate, (SetOpStatePerGroup) entry->additional);
530 :
531 351682 : if (setopstate->numOutput > 0)
532 : {
533 30948 : setopstate->numOutput--;
534 30948 : return ExecStoreMinimalTuple(entry->firstTuple,
535 : resultTupleSlot,
536 : false);
537 : }
538 : }
539 :
540 : /* No more groups */
541 0 : ExecClearTuple(resultTupleSlot);
542 0 : return NULL;
543 : }
544 :
545 : /* ----------------------------------------------------------------
546 : * ExecInitSetOp
547 : *
548 : * This initializes the setop node state structures and
549 : * the node's subplan.
550 : * ----------------------------------------------------------------
551 : */
552 : SetOpState *
553 680 : ExecInitSetOp(SetOp *node, EState *estate, int eflags)
554 : {
555 : SetOpState *setopstate;
556 :
557 : /* check for unsupported flags */
558 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
559 :
560 : /*
561 : * create state structure
562 : */
563 680 : setopstate = makeNode(SetOpState);
564 680 : setopstate->ps.plan = (Plan *) node;
565 680 : setopstate->ps.state = estate;
566 680 : setopstate->ps.ExecProcNode = ExecSetOp;
567 :
568 680 : setopstate->setop_done = false;
569 680 : setopstate->numOutput = 0;
570 680 : setopstate->numCols = node->numCols;
571 680 : setopstate->need_init = true;
572 :
573 : /*
574 : * create expression context
575 : */
576 680 : ExecAssignExprContext(estate, &setopstate->ps);
577 :
578 : /*
579 : * If hashing, we also need a longer-lived context to store the hash
580 : * table. The table can't just be kept in the per-query context because
581 : * we want to be able to throw it away in ExecReScanSetOp.
582 : */
583 680 : if (node->strategy == SETOP_HASHED)
584 374 : setopstate->tableContext =
585 374 : AllocSetContextCreate(CurrentMemoryContext,
586 : "SetOp hash table",
587 : ALLOCSET_DEFAULT_SIZES);
588 :
589 : /*
590 : * initialize child nodes
591 : *
592 : * If we are hashing then the child plans do not need to handle REWIND
593 : * efficiently; see ExecReScanSetOp.
594 : */
595 680 : if (node->strategy == SETOP_HASHED)
596 374 : eflags &= ~EXEC_FLAG_REWIND;
597 680 : outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
598 680 : innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
599 :
600 : /*
601 : * Initialize locally-allocated slots. In hashed mode, we just need a
602 : * result slot. In sorted mode, we need one first-tuple-of-group slot for
603 : * each input; we use the result slot for the left input's slot and create
604 : * another for the right input. (Note: the nextTupleSlot slots are not
605 : * ours, but just point to the last slot returned by the input plan node.)
606 : */
607 680 : ExecInitResultTupleSlotTL(&setopstate->ps, &TTSOpsMinimalTuple);
608 680 : if (node->strategy != SETOP_HASHED)
609 : {
610 306 : setopstate->leftInput.firstTupleSlot =
611 306 : setopstate->ps.ps_ResultTupleSlot;
612 306 : setopstate->rightInput.firstTupleSlot =
613 306 : ExecInitExtraTupleSlot(estate,
614 : setopstate->ps.ps_ResultTupleDesc,
615 : &TTSOpsMinimalTuple);
616 : }
617 :
618 : /* Setop nodes do no projections. */
619 680 : setopstate->ps.ps_ProjInfo = NULL;
620 :
621 : /*
622 : * Precompute fmgr lookup data for inner loop. We need equality and
623 : * hashing functions to do it by hashing, while for sorting we need
624 : * SortSupport data.
625 : */
626 680 : if (node->strategy == SETOP_HASHED)
627 374 : execTuplesHashPrepare(node->numCols,
628 374 : node->cmpOperators,
629 : &setopstate->eqfuncoids,
630 : &setopstate->hashfunctions);
631 : else
632 : {
633 306 : int nkeys = node->numCols;
634 :
635 306 : setopstate->sortKeys = (SortSupport)
636 306 : palloc0(nkeys * sizeof(SortSupportData));
637 2208 : for (int i = 0; i < nkeys; i++)
638 : {
639 1902 : SortSupport sortKey = setopstate->sortKeys + i;
640 :
641 1902 : sortKey->ssup_cxt = CurrentMemoryContext;
642 1902 : sortKey->ssup_collation = node->cmpCollations[i];
643 1902 : sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
644 1902 : sortKey->ssup_attno = node->cmpColIdx[i];
645 : /* abbreviated key conversion is not useful here */
646 1902 : sortKey->abbreviate = false;
647 :
648 1902 : PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
649 : }
650 : }
651 :
652 : /* Create a hash table if needed */
653 680 : if (node->strategy == SETOP_HASHED)
654 : {
655 374 : build_hash_table(setopstate);
656 374 : setopstate->table_filled = false;
657 : }
658 :
659 680 : return setopstate;
660 : }
661 :
662 : /* ----------------------------------------------------------------
663 : * ExecEndSetOp
664 : *
665 : * This shuts down the subplans and frees resources allocated
666 : * to this node.
667 : * ----------------------------------------------------------------
668 : */
669 : void
670 680 : ExecEndSetOp(SetOpState *node)
671 : {
672 : /* free subsidiary stuff including hashtable */
673 680 : if (node->tableContext)
674 374 : MemoryContextDelete(node->tableContext);
675 :
676 680 : ExecEndNode(outerPlanState(node));
677 680 : ExecEndNode(innerPlanState(node));
678 680 : }
679 :
680 :
681 : void
682 1200 : ExecReScanSetOp(SetOpState *node)
683 : {
684 1200 : PlanState *outerPlan = outerPlanState(node);
685 1200 : PlanState *innerPlan = innerPlanState(node);
686 :
687 1200 : ExecClearTuple(node->ps.ps_ResultTupleSlot);
688 1200 : node->setop_done = false;
689 1200 : node->numOutput = 0;
690 :
691 1200 : if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
692 : {
693 : /*
694 : * In the hashed case, if we haven't yet built the hash table then we
695 : * can just return; nothing done yet, so nothing to undo. If subnode's
696 : * chgParam is not NULL then it will be re-scanned by ExecProcNode,
697 : * else no reason to re-scan it at all.
698 : */
699 600 : if (!node->table_filled)
700 6 : return;
701 :
702 : /*
703 : * If we do have the hash table and the subplans do not have any
704 : * parameter changes, then we can just rescan the existing hash table;
705 : * no need to build it again.
706 : */
707 594 : if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
708 : {
709 0 : ResetTupleHashIterator(node->hashtable, &node->hashiter);
710 0 : return;
711 : }
712 :
713 : /* Release any hashtable storage */
714 594 : if (node->tableContext)
715 594 : MemoryContextReset(node->tableContext);
716 :
717 : /* And rebuild an empty hashtable */
718 594 : ResetTupleHashTable(node->hashtable);
719 594 : node->table_filled = false;
720 : }
721 : else
722 : {
723 : /* Need to re-read first input from each side */
724 600 : node->need_init = true;
725 : }
726 :
727 : /*
728 : * if chgParam of subnode is not null then plan will be re-scanned by
729 : * first ExecProcNode.
730 : */
731 1194 : if (outerPlan->chgParam == NULL)
732 0 : ExecReScan(outerPlan);
733 1194 : if (innerPlan->chgParam == NULL)
734 0 : ExecReScan(innerPlan);
735 : }
|