Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeSetOp.c
4 : * Routines to handle INTERSECT and EXCEPT selection
5 : *
6 : * The input of a SetOp node consists of two relations (outer and inner)
7 : * with identical column sets. In EXCEPT queries the outer relation is
8 : * always the left side, while in INTERSECT cases the planner tries to
9 : * make the outer relation be the smaller of the two inputs.
10 : *
11 : * In SETOP_SORTED mode, each input has been sorted according to all the
12 : * grouping columns. The SetOp node essentially performs a merge join on
13 : * the grouping columns, except that it is only interested in counting how
14 : * many tuples from each input match. Then it is a simple matter to emit
15 : * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 : * or EXCEPT ALL.
17 : *
18 : * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 : * We read the outer relation and build a hash table in memory with one entry
20 : * for each group of identical tuples, counting the number of tuples in the
21 : * group. Then we read the inner relation and count the number of tuples
22 : * matching each outer group. (We can disregard any tuples appearing only
23 : * in the inner relation, since they cannot result in any output.) After
24 : * seeing all the input, we scan the hashtable and generate the correct
25 : * output using those counts.
26 : *
27 : * This node type is not used for UNION or UNION ALL, since those can be
28 : * implemented more cheaply (there's no need to count the number of
29 : * matching tuples).
30 : *
31 : * Note that SetOp does no qual checking nor projection. The delivered
32 : * output tuples are just copies of the first-to-arrive tuple in each
33 : * input group.
34 : *
35 : *
36 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
37 : * Portions Copyright (c) 1994, Regents of the University of California
38 : *
39 : *
40 : * IDENTIFICATION
41 : * src/backend/executor/nodeSetOp.c
42 : *
43 : *-------------------------------------------------------------------------
44 : */
45 :
46 : #include "postgres.h"
47 :
48 : #include "access/htup_details.h"
49 : #include "executor/executor.h"
50 : #include "executor/nodeSetOp.h"
51 : #include "miscadmin.h"
52 : #include "utils/memutils.h"
53 :
54 :
55 : /*
56 : * SetOpStatePerGroupData - per-group working state
57 : *
58 : * In SETOP_SORTED mode, we need only one of these structs, and it's just a
59 : * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
60 : * contains one of these for each tuple group.
61 : */
62 : typedef struct SetOpStatePerGroupData
63 : {
64 : int64 numLeft; /* number of left-input dups in group */
65 : int64 numRight; /* number of right-input dups in group */
66 : } SetOpStatePerGroupData;
67 :
68 : typedef SetOpStatePerGroupData *SetOpStatePerGroup;
69 :
70 :
71 : static TupleTableSlot *setop_retrieve_sorted(SetOpState *setopstate);
72 : static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
73 : SetOpState *setopstate);
74 : static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
75 : SetOpState *setopstate);
76 : static void setop_fill_hash_table(SetOpState *setopstate);
77 : static TupleTableSlot *setop_retrieve_hash_table(SetOpState *setopstate);
78 :
79 :
80 : /*
81 : * Initialize the hash table to empty.
82 : */
83 : static void
84 386 : build_hash_table(SetOpState *setopstate)
85 : {
86 386 : SetOp *node = (SetOp *) setopstate->ps.plan;
87 386 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
88 386 : TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
89 :
90 : Assert(node->strategy == SETOP_HASHED);
91 :
92 : /*
93 : * If both child plans deliver the same fixed tuple slot type, we can tell
94 : * BuildTupleHashTable to expect that slot type as input. Otherwise,
95 : * we'll pass NULL denoting that any slot type is possible.
96 : */
97 386 : setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
98 : desc,
99 : ExecGetCommonChildSlotOps(&setopstate->ps),
100 : node->numCols,
101 : node->cmpColIdx,
102 386 : setopstate->eqfuncoids,
103 : setopstate->hashfunctions,
104 : node->cmpCollations,
105 : node->numGroups,
106 : sizeof(SetOpStatePerGroupData),
107 386 : setopstate->ps.state->es_query_cxt,
108 : setopstate->tuplesContext,
109 : econtext->ecxt_per_tuple_memory,
110 : false);
111 386 : }
112 :
113 : /* Planner support routine to estimate space needed for hash table */
114 : Size
115 608 : EstimateSetOpHashTableSpace(double nentries, Size tupleWidth)
116 : {
117 608 : return EstimateTupleHashTableSpace(nentries,
118 : tupleWidth,
119 : sizeof(SetOpStatePerGroupData));
120 : }
121 :
122 : /*
123 : * We've completed processing a tuple group. Decide how many copies (if any)
124 : * of its representative row to emit, and store the count into numOutput.
125 : * This logic is straight from the SQL92 specification.
126 : */
127 : static void
128 442890 : set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
129 : {
130 442890 : SetOp *plannode = (SetOp *) setopstate->ps.plan;
131 :
132 442890 : switch (plannode->cmd)
133 : {
134 60360 : case SETOPCMD_INTERSECT:
135 60360 : if (pergroup->numLeft > 0 && pergroup->numRight > 0)
136 60228 : setopstate->numOutput = 1;
137 : else
138 132 : setopstate->numOutput = 0;
139 60360 : break;
140 36 : case SETOPCMD_INTERSECT_ALL:
141 36 : setopstate->numOutput =
142 36 : (pergroup->numLeft < pergroup->numRight) ?
143 36 : pergroup->numLeft : pergroup->numRight;
144 36 : break;
145 370410 : case SETOPCMD_EXCEPT:
146 370410 : if (pergroup->numLeft > 0 && pergroup->numRight == 0)
147 1422 : setopstate->numOutput = 1;
148 : else
149 368988 : setopstate->numOutput = 0;
150 370410 : break;
151 12084 : case SETOPCMD_EXCEPT_ALL:
152 12084 : setopstate->numOutput =
153 12084 : (pergroup->numLeft < pergroup->numRight) ?
154 12084 : 0 : (pergroup->numLeft - pergroup->numRight);
155 12084 : break;
156 0 : default:
157 0 : elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
158 : break;
159 : }
160 442890 : }
161 :
162 :
163 : /* ----------------------------------------------------------------
164 : * ExecSetOp
165 : * ----------------------------------------------------------------
166 : */
167 : static TupleTableSlot * /* return: a tuple or NULL */
168 63530 : ExecSetOp(PlanState *pstate)
169 : {
170 63530 : SetOpState *node = castNode(SetOpState, pstate);
171 63530 : SetOp *plannode = (SetOp *) node->ps.plan;
172 63530 : TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
173 :
174 63530 : CHECK_FOR_INTERRUPTS();
175 :
176 : /*
177 : * If the previously-returned tuple needs to be returned more than once,
178 : * keep returning it.
179 : */
180 63530 : if (node->numOutput > 0)
181 : {
182 48 : node->numOutput--;
183 48 : return resultTupleSlot;
184 : }
185 :
186 : /* Otherwise, we're done if we are out of groups */
187 63482 : if (node->setop_done)
188 0 : return NULL;
189 :
190 : /* Fetch the next tuple group according to the correct strategy */
191 63482 : if (plannode->strategy == SETOP_HASHED)
192 : {
193 31880 : if (!node->table_filled)
194 932 : setop_fill_hash_table(node);
195 31880 : return setop_retrieve_hash_table(node);
196 : }
197 : else
198 31602 : return setop_retrieve_sorted(node);
199 : }
200 :
201 : /*
202 : * ExecSetOp for non-hashed case
203 : */
204 : static TupleTableSlot *
205 31602 : setop_retrieve_sorted(SetOpState *setopstate)
206 : {
207 : PlanState *outerPlan;
208 : PlanState *innerPlan;
209 : TupleTableSlot *resultTupleSlot;
210 :
211 : /*
212 : * get state info from node
213 : */
214 31602 : outerPlan = outerPlanState(setopstate);
215 31602 : innerPlan = innerPlanState(setopstate);
216 31602 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
217 :
218 : /*
219 : * If first time through, establish the invariant that setop_load_group
220 : * expects: each side's nextTupleSlot is the next output from the child
221 : * plan, or empty if there is no more output from it.
222 : */
223 31602 : if (setopstate->need_init)
224 : {
225 810 : setopstate->need_init = false;
226 :
227 810 : setopstate->leftInput.nextTupleSlot = ExecProcNode(outerPlan);
228 :
229 : /*
230 : * If the outer relation is empty, then we will emit nothing, and we
231 : * don't need to read the inner relation at all.
232 : */
233 810 : if (TupIsNull(setopstate->leftInput.nextTupleSlot))
234 : {
235 0 : setopstate->setop_done = true;
236 0 : return NULL;
237 : }
238 :
239 810 : setopstate->rightInput.nextTupleSlot = ExecProcNode(innerPlan);
240 :
241 : /* Set flags that we've not completed either side's group */
242 810 : setopstate->leftInput.needGroup = true;
243 810 : setopstate->rightInput.needGroup = true;
244 : }
245 :
246 : /*
247 : * We loop retrieving groups until we find one we should return
248 : */
249 92082 : while (!setopstate->setop_done)
250 : {
251 : int cmpresult;
252 : SetOpStatePerGroupData pergroup;
253 :
254 : /*
255 : * Fetch the rest of the current outer group, if we didn't already.
256 : */
257 92082 : if (setopstate->leftInput.needGroup)
258 91776 : setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
259 :
260 : /*
261 : * If no more outer groups, we're done, and don't need to look at any
262 : * more of the inner relation.
263 : */
264 92082 : if (setopstate->leftInput.numTuples == 0)
265 : {
266 810 : setopstate->setop_done = true;
267 810 : break;
268 : }
269 :
270 : /*
271 : * Fetch the rest of the current inner group, if we didn't already.
272 : */
273 91272 : if (setopstate->rightInput.needGroup)
274 91194 : setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
275 :
276 : /*
277 : * Determine whether we have matching groups on both sides (this is
278 : * basically like the core logic of a merge join).
279 : */
280 91272 : if (setopstate->rightInput.numTuples == 0)
281 258 : cmpresult = -1; /* as though left input is lesser */
282 : else
283 91014 : cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
284 : setopstate->rightInput.firstTupleSlot,
285 : setopstate);
286 :
287 91272 : if (cmpresult < 0)
288 : {
289 : /* Left group is first, and has no right matches */
290 732 : pergroup.numLeft = setopstate->leftInput.numTuples;
291 732 : pergroup.numRight = 0;
292 : /* We'll need another left group next time */
293 732 : setopstate->leftInput.needGroup = true;
294 : }
295 90540 : else if (cmpresult == 0)
296 : {
297 : /* We have matching groups */
298 90234 : pergroup.numLeft = setopstate->leftInput.numTuples;
299 90234 : pergroup.numRight = setopstate->rightInput.numTuples;
300 : /* We'll need to read from both sides next time */
301 90234 : setopstate->leftInput.needGroup = true;
302 90234 : setopstate->rightInput.needGroup = true;
303 : }
304 : else
305 : {
306 : /* Right group has no left matches, so we can ignore it */
307 306 : setopstate->rightInput.needGroup = true;
308 306 : continue;
309 : }
310 :
311 : /*
312 : * Done scanning these input tuple groups. See if we should emit any
313 : * copies of result tuple, and if so return the first copy. (Note
314 : * that the result tuple is the same as the left input's firstTuple
315 : * slot.)
316 : */
317 90966 : set_output_count(setopstate, &pergroup);
318 :
319 90966 : if (setopstate->numOutput > 0)
320 : {
321 30792 : setopstate->numOutput--;
322 30792 : return resultTupleSlot;
323 : }
324 : }
325 :
326 : /* No more groups */
327 810 : ExecClearTuple(resultTupleSlot);
328 810 : return NULL;
329 : }
330 :
331 : /*
332 : * Load next group of tuples from one child plan or the other.
333 : *
334 : * On entry, we've already read the first tuple of the next group
335 : * (if there is one) into input->nextTupleSlot. This invariant
336 : * is maintained on exit.
337 : */
338 : static void
339 182970 : setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
340 : SetOpState *setopstate)
341 : {
342 182970 : input->needGroup = false;
343 :
344 : /* If we've exhausted this child plan, report an empty group */
345 182970 : if (TupIsNull(input->nextTupleSlot))
346 : {
347 1062 : ExecClearTuple(input->firstTupleSlot);
348 1062 : input->numTuples = 0;
349 1062 : return;
350 : }
351 :
352 : /* Make a local copy of the first tuple for comparisons */
353 181908 : ExecStoreMinimalTuple(ExecCopySlotMinimalTuple(input->nextTupleSlot),
354 : input->firstTupleSlot,
355 : true);
356 : /* and count it */
357 181908 : input->numTuples = 1;
358 :
359 : /* Scan till we find the end-of-group */
360 : for (;;)
361 30330 : {
362 : int cmpresult;
363 :
364 : /* Get next input tuple, if there is one */
365 212238 : input->nextTupleSlot = ExecProcNode(inputPlan);
366 212238 : if (TupIsNull(input->nextTupleSlot))
367 : break;
368 :
369 : /* There is; does it belong to same group as firstTuple? */
370 210624 : cmpresult = setop_compare_slots(input->firstTupleSlot,
371 : input->nextTupleSlot,
372 : setopstate);
373 : Assert(cmpresult <= 0); /* else input is mis-sorted */
374 210624 : if (cmpresult != 0)
375 180294 : break;
376 :
377 : /* Still in same group, so count this tuple */
378 30330 : input->numTuples++;
379 : }
380 : }
381 :
382 : /*
383 : * Compare the tuples in the two given slots.
384 : */
385 : static int
386 301638 : setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
387 : SetOpState *setopstate)
388 : {
389 : /* We'll often need to fetch all the columns, so just do it */
390 301638 : slot_getallattrs(s1);
391 301638 : slot_getallattrs(s2);
392 424062 : for (int nkey = 0; nkey < setopstate->numCols; nkey++)
393 : {
394 303498 : SortSupport sortKey = setopstate->sortKeys + nkey;
395 303498 : AttrNumber attno = sortKey->ssup_attno;
396 303498 : Datum datum1 = s1->tts_values[attno - 1],
397 303498 : datum2 = s2->tts_values[attno - 1];
398 303498 : bool isNull1 = s1->tts_isnull[attno - 1],
399 303498 : isNull2 = s2->tts_isnull[attno - 1];
400 : int compare;
401 :
402 303498 : compare = ApplySortComparator(datum1, isNull1,
403 : datum2, isNull2,
404 : sortKey);
405 303498 : if (compare != 0)
406 181074 : return compare;
407 : }
408 120564 : return 0;
409 : }
410 :
411 : /*
412 : * ExecSetOp for hashed case: phase 1, read inputs and build hash table
413 : */
414 : static void
415 932 : setop_fill_hash_table(SetOpState *setopstate)
416 : {
417 : PlanState *outerPlan;
418 : PlanState *innerPlan;
419 932 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
420 932 : bool have_tuples = false;
421 :
422 : /*
423 : * get state info from node
424 : */
425 932 : outerPlan = outerPlanState(setopstate);
426 932 : innerPlan = innerPlanState(setopstate);
427 :
428 : /*
429 : * Process each outer-plan tuple, and then fetch the next one, until we
430 : * exhaust the outer plan.
431 : */
432 : for (;;)
433 382142 : {
434 : TupleTableSlot *outerslot;
435 383074 : TupleHashTable hashtable = setopstate->hashtable;
436 : TupleHashEntryData *entry;
437 : SetOpStatePerGroup pergroup;
438 : bool isnew;
439 :
440 383074 : outerslot = ExecProcNode(outerPlan);
441 383074 : if (TupIsNull(outerslot))
442 : break;
443 382142 : have_tuples = true;
444 :
445 : /* Find or build hashtable entry for this tuple's group */
446 382142 : entry = LookupTupleHashEntry(hashtable,
447 : outerslot,
448 : &isnew, NULL);
449 :
450 382142 : pergroup = TupleHashEntryGetAdditional(hashtable, entry);
451 : /* If new tuple group, initialize counts to zero */
452 382142 : if (isnew)
453 : {
454 351924 : pergroup->numLeft = 0;
455 351924 : pergroup->numRight = 0;
456 : }
457 :
458 : /* Advance the counts */
459 382142 : pergroup->numLeft++;
460 :
461 : /* Must reset expression context after each hashtable lookup */
462 382142 : ResetExprContext(econtext);
463 : }
464 :
465 : /*
466 : * If the outer relation is empty, then we will emit nothing, and we don't
467 : * need to read the inner relation at all.
468 : */
469 932 : if (have_tuples)
470 : {
471 : /*
472 : * Process each inner-plan tuple, and then fetch the next one, until
473 : * we exhaust the inner plan.
474 : */
475 : for (;;)
476 382070 : {
477 : TupleTableSlot *innerslot;
478 383002 : TupleHashTable hashtable = setopstate->hashtable;
479 : TupleHashEntryData *entry;
480 :
481 383002 : innerslot = ExecProcNode(innerPlan);
482 383002 : if (TupIsNull(innerslot))
483 : break;
484 :
485 : /* For tuples not seen previously, do not make hashtable entry */
486 382070 : entry = LookupTupleHashEntry(hashtable,
487 : innerslot,
488 : NULL, NULL);
489 :
490 : /* Advance the counts if entry is already present */
491 382070 : if (entry)
492 : {
493 351266 : SetOpStatePerGroup pergroup = TupleHashEntryGetAdditional(hashtable, entry);
494 :
495 351266 : pergroup->numRight++;
496 : }
497 :
498 : /* Must reset expression context after each hashtable lookup */
499 382070 : ResetExprContext(econtext);
500 : }
501 : }
502 :
503 932 : setopstate->table_filled = true;
504 : /* Initialize to walk the hash table */
505 932 : ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
506 932 : }
507 :
508 : /*
509 : * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
510 : */
511 : static TupleTableSlot *
512 31880 : setop_retrieve_hash_table(SetOpState *setopstate)
513 : {
514 : TupleHashEntry entry;
515 : TupleTableSlot *resultTupleSlot;
516 :
517 : /*
518 : * get state info from node
519 : */
520 31880 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
521 :
522 : /*
523 : * We loop retrieving groups until we find one we should return
524 : */
525 352856 : while (!setopstate->setop_done)
526 : {
527 352856 : TupleHashTable hashtable = setopstate->hashtable;
528 : SetOpStatePerGroup pergroup;
529 :
530 352856 : CHECK_FOR_INTERRUPTS();
531 :
532 : /*
533 : * Find the next entry in the hash table
534 : */
535 352856 : entry = ScanTupleHashTable(hashtable, &setopstate->hashiter);
536 352856 : if (entry == NULL)
537 : {
538 : /* No more entries in hashtable, so done */
539 932 : setopstate->setop_done = true;
540 932 : return NULL;
541 : }
542 :
543 : /*
544 : * See if we should emit any copies of this tuple, and if so return
545 : * the first copy.
546 : */
547 351924 : pergroup = TupleHashEntryGetAdditional(hashtable, entry);
548 351924 : set_output_count(setopstate, pergroup);
549 :
550 351924 : if (setopstate->numOutput > 0)
551 : {
552 30948 : setopstate->numOutput--;
553 30948 : return ExecStoreMinimalTuple(TupleHashEntryGetTuple(entry),
554 : resultTupleSlot,
555 : false);
556 : }
557 : }
558 :
559 : /* No more groups */
560 0 : ExecClearTuple(resultTupleSlot);
561 0 : return NULL;
562 : }
563 :
564 : /* ----------------------------------------------------------------
565 : * ExecInitSetOp
566 : *
567 : * This initializes the setop node state structures and
568 : * the node's subplan.
569 : * ----------------------------------------------------------------
570 : */
571 : SetOpState *
572 668 : ExecInitSetOp(SetOp *node, EState *estate, int eflags)
573 : {
574 : SetOpState *setopstate;
575 :
576 : /* check for unsupported flags */
577 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
578 :
579 : /*
580 : * create state structure
581 : */
582 668 : setopstate = makeNode(SetOpState);
583 668 : setopstate->ps.plan = (Plan *) node;
584 668 : setopstate->ps.state = estate;
585 668 : setopstate->ps.ExecProcNode = ExecSetOp;
586 :
587 668 : setopstate->setop_done = false;
588 668 : setopstate->numOutput = 0;
589 668 : setopstate->numCols = node->numCols;
590 668 : setopstate->need_init = true;
591 :
592 : /*
593 : * create expression context
594 : */
595 668 : ExecAssignExprContext(estate, &setopstate->ps);
596 :
597 : /*
598 : * If hashing, we also need a longer-lived context to store the hash
599 : * table. The table can't just be kept in the per-query context because
600 : * we want to be able to throw it away in ExecReScanSetOp. We can use a
601 : * BumpContext to save storage, because we will have no need to delete
602 : * individual table entries.
603 : */
604 668 : if (node->strategy == SETOP_HASHED)
605 386 : setopstate->tuplesContext =
606 386 : BumpContextCreate(CurrentMemoryContext,
607 : "SetOp hashed tuples",
608 : ALLOCSET_DEFAULT_SIZES);
609 :
610 : /*
611 : * initialize child nodes
612 : *
613 : * If we are hashing then the child plans do not need to handle REWIND
614 : * efficiently; see ExecReScanSetOp.
615 : */
616 668 : if (node->strategy == SETOP_HASHED)
617 386 : eflags &= ~EXEC_FLAG_REWIND;
618 668 : outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
619 668 : innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
620 :
621 : /*
622 : * Initialize locally-allocated slots. In hashed mode, we just need a
623 : * result slot. In sorted mode, we need one first-tuple-of-group slot for
624 : * each input; we use the result slot for the left input's slot and create
625 : * another for the right input. (Note: the nextTupleSlot slots are not
626 : * ours, but just point to the last slot returned by the input plan node.)
627 : */
628 668 : ExecInitResultTupleSlotTL(&setopstate->ps, &TTSOpsMinimalTuple);
629 668 : if (node->strategy != SETOP_HASHED)
630 : {
631 282 : setopstate->leftInput.firstTupleSlot =
632 282 : setopstate->ps.ps_ResultTupleSlot;
633 282 : setopstate->rightInput.firstTupleSlot =
634 282 : ExecInitExtraTupleSlot(estate,
635 : setopstate->ps.ps_ResultTupleDesc,
636 : &TTSOpsMinimalTuple);
637 : }
638 :
639 : /* Setop nodes do no projections. */
640 668 : setopstate->ps.ps_ProjInfo = NULL;
641 :
642 : /*
643 : * Precompute fmgr lookup data for inner loop. We need equality and
644 : * hashing functions to do it by hashing, while for sorting we need
645 : * SortSupport data.
646 : */
647 668 : if (node->strategy == SETOP_HASHED)
648 386 : execTuplesHashPrepare(node->numCols,
649 386 : node->cmpOperators,
650 : &setopstate->eqfuncoids,
651 : &setopstate->hashfunctions);
652 : else
653 : {
654 282 : int nkeys = node->numCols;
655 :
656 282 : setopstate->sortKeys = (SortSupport)
657 282 : palloc0(nkeys * sizeof(SortSupportData));
658 1440 : for (int i = 0; i < nkeys; i++)
659 : {
660 1158 : SortSupport sortKey = setopstate->sortKeys + i;
661 :
662 1158 : sortKey->ssup_cxt = CurrentMemoryContext;
663 1158 : sortKey->ssup_collation = node->cmpCollations[i];
664 1158 : sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
665 1158 : sortKey->ssup_attno = node->cmpColIdx[i];
666 : /* abbreviated key conversion is not useful here */
667 1158 : sortKey->abbreviate = false;
668 :
669 1158 : PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
670 : }
671 : }
672 :
673 : /* Create a hash table if needed */
674 668 : if (node->strategy == SETOP_HASHED)
675 : {
676 386 : build_hash_table(setopstate);
677 386 : setopstate->table_filled = false;
678 : }
679 :
680 668 : return setopstate;
681 : }
682 :
683 : /* ----------------------------------------------------------------
684 : * ExecEndSetOp
685 : *
686 : * This shuts down the subplans and frees resources allocated
687 : * to this node.
688 : * ----------------------------------------------------------------
689 : */
690 : void
691 668 : ExecEndSetOp(SetOpState *node)
692 : {
693 : /* free subsidiary stuff including hashtable data */
694 668 : if (node->tuplesContext)
695 386 : MemoryContextDelete(node->tuplesContext);
696 :
697 668 : ExecEndNode(outerPlanState(node));
698 668 : ExecEndNode(innerPlanState(node));
699 668 : }
700 :
701 :
702 : void
703 1200 : ExecReScanSetOp(SetOpState *node)
704 : {
705 1200 : PlanState *outerPlan = outerPlanState(node);
706 1200 : PlanState *innerPlan = innerPlanState(node);
707 :
708 1200 : ExecClearTuple(node->ps.ps_ResultTupleSlot);
709 1200 : node->setop_done = false;
710 1200 : node->numOutput = 0;
711 :
712 1200 : if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
713 : {
714 : /*
715 : * In the hashed case, if we haven't yet built the hash table then we
716 : * can just return; nothing done yet, so nothing to undo. If subnode's
717 : * chgParam is not NULL then it will be re-scanned by ExecProcNode,
718 : * else no reason to re-scan it at all.
719 : */
720 600 : if (!node->table_filled)
721 6 : return;
722 :
723 : /*
724 : * If we do have the hash table and the subplans do not have any
725 : * parameter changes, then we can just rescan the existing hash table;
726 : * no need to build it again.
727 : */
728 594 : if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
729 : {
730 0 : ResetTupleHashIterator(node->hashtable, &node->hashiter);
731 0 : return;
732 : }
733 :
734 : /* Else, we must rebuild the hashtable */
735 594 : ResetTupleHashTable(node->hashtable);
736 594 : node->table_filled = false;
737 : }
738 : else
739 : {
740 : /* Need to re-read first input from each side */
741 600 : node->need_init = true;
742 : }
743 :
744 : /*
745 : * if chgParam of subnode is not null then plan will be re-scanned by
746 : * first ExecProcNode.
747 : */
748 1194 : if (outerPlan->chgParam == NULL)
749 0 : ExecReScan(outerPlan);
750 1194 : if (innerPlan->chgParam == NULL)
751 0 : ExecReScan(innerPlan);
752 : }
|