Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeSetOp.c
4 : * Routines to handle INTERSECT and EXCEPT selection
5 : *
6 : * The input of a SetOp node consists of two relations (outer and inner)
7 : * with identical column sets. In EXCEPT queries the outer relation is
8 : * always the left side, while in INTERSECT cases the planner tries to
9 : * make the outer relation be the smaller of the two inputs.
10 : *
11 : * In SETOP_SORTED mode, each input has been sorted according to all the
12 : * grouping columns. The SetOp node essentially performs a merge join on
13 : * the grouping columns, except that it is only interested in counting how
14 : * many tuples from each input match. Then it is a simple matter to emit
15 : * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 : * or EXCEPT ALL.
17 : *
18 : * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 : * We read the outer relation and build a hash table in memory with one entry
20 : * for each group of identical tuples, counting the number of tuples in the
21 : * group. Then we read the inner relation and count the number of tuples
22 : * matching each outer group. (We can disregard any tuples appearing only
23 : * in the inner relation, since they cannot result in any output.) After
24 : * seeing all the input, we scan the hashtable and generate the correct
25 : * output using those counts.
26 : *
27 : * This node type is not used for UNION or UNION ALL, since those can be
28 : * implemented more cheaply (there's no need to count the number of
29 : * matching tuples).
30 : *
31 : * Note that SetOp does no qual checking nor projection. The delivered
32 : * output tuples are just copies of the first-to-arrive tuple in each
33 : * input group.
34 : *
35 : *
36 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
37 : * Portions Copyright (c) 1994, Regents of the University of California
38 : *
39 : *
40 : * IDENTIFICATION
41 : * src/backend/executor/nodeSetOp.c
42 : *
43 : *-------------------------------------------------------------------------
44 : */
45 :
46 : #include "postgres.h"
47 :
48 : #include "access/htup_details.h"
49 : #include "executor/executor.h"
50 : #include "executor/nodeSetOp.h"
51 : #include "miscadmin.h"
52 : #include "utils/memutils.h"
53 :
54 :
55 : /*
56 : * SetOpStatePerGroupData - per-group working state
57 : *
58 : * In SETOP_SORTED mode, we need only one of these structs, and it's just a
59 : * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
60 : * contains one of these for each tuple group.
61 : */
62 : typedef struct SetOpStatePerGroupData
63 : {
64 : int64 numLeft; /* number of left-input dups in group */
65 : int64 numRight; /* number of right-input dups in group */
66 : } SetOpStatePerGroupData;
67 :
68 : typedef SetOpStatePerGroupData *SetOpStatePerGroup;
69 :
70 :
71 : static TupleTableSlot *setop_retrieve_sorted(SetOpState *setopstate);
72 : static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
73 : SetOpState *setopstate);
74 : static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
75 : SetOpState *setopstate);
76 : static void setop_fill_hash_table(SetOpState *setopstate);
77 : static TupleTableSlot *setop_retrieve_hash_table(SetOpState *setopstate);
78 :
79 :
80 : /*
81 : * Initialize the hash table to empty.
82 : */
83 : static void
84 380 : build_hash_table(SetOpState *setopstate)
85 : {
86 380 : SetOp *node = (SetOp *) setopstate->ps.plan;
87 380 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
88 380 : TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
89 :
90 : Assert(node->strategy == SETOP_HASHED);
91 : Assert(node->numGroups > 0);
92 :
93 : /*
94 : * If both child plans deliver the same fixed tuple slot type, we can tell
95 : * BuildTupleHashTable to expect that slot type as input. Otherwise,
96 : * we'll pass NULL denoting that any slot type is possible.
97 : */
98 380 : setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
99 : desc,
100 : ExecGetCommonChildSlotOps(&setopstate->ps),
101 : node->numCols,
102 : node->cmpColIdx,
103 380 : setopstate->eqfuncoids,
104 : setopstate->hashfunctions,
105 : node->cmpCollations,
106 : node->numGroups,
107 : sizeof(SetOpStatePerGroupData),
108 380 : setopstate->ps.state->es_query_cxt,
109 : setopstate->tableContext,
110 : econtext->ecxt_per_tuple_memory,
111 : false);
112 380 : }
113 :
114 : /*
115 : * We've completed processing a tuple group. Decide how many copies (if any)
116 : * of its representative row to emit, and store the count into numOutput.
117 : * This logic is straight from the SQL92 specification.
118 : */
119 : static void
120 442928 : set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
121 : {
122 442928 : SetOp *plannode = (SetOp *) setopstate->ps.plan;
123 :
124 442928 : switch (plannode->cmd)
125 : {
126 60360 : case SETOPCMD_INTERSECT:
127 60360 : if (pergroup->numLeft > 0 && pergroup->numRight > 0)
128 60228 : setopstate->numOutput = 1;
129 : else
130 132 : setopstate->numOutput = 0;
131 60360 : break;
132 36 : case SETOPCMD_INTERSECT_ALL:
133 36 : setopstate->numOutput =
134 36 : (pergroup->numLeft < pergroup->numRight) ?
135 36 : pergroup->numLeft : pergroup->numRight;
136 36 : break;
137 370448 : case SETOPCMD_EXCEPT:
138 370448 : if (pergroup->numLeft > 0 && pergroup->numRight == 0)
139 1422 : setopstate->numOutput = 1;
140 : else
141 369026 : setopstate->numOutput = 0;
142 370448 : break;
143 12084 : case SETOPCMD_EXCEPT_ALL:
144 12084 : setopstate->numOutput =
145 12084 : (pergroup->numLeft < pergroup->numRight) ?
146 12084 : 0 : (pergroup->numLeft - pergroup->numRight);
147 12084 : break;
148 0 : default:
149 0 : elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
150 : break;
151 : }
152 442928 : }
153 :
154 :
155 : /* ----------------------------------------------------------------
156 : * ExecSetOp
157 : * ----------------------------------------------------------------
158 : */
159 : static TupleTableSlot * /* return: a tuple or NULL */
160 63530 : ExecSetOp(PlanState *pstate)
161 : {
162 63530 : SetOpState *node = castNode(SetOpState, pstate);
163 63530 : SetOp *plannode = (SetOp *) node->ps.plan;
164 63530 : TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
165 :
166 63530 : CHECK_FOR_INTERRUPTS();
167 :
168 : /*
169 : * If the previously-returned tuple needs to be returned more than once,
170 : * keep returning it.
171 : */
172 63530 : if (node->numOutput > 0)
173 : {
174 48 : node->numOutput--;
175 48 : return resultTupleSlot;
176 : }
177 :
178 : /* Otherwise, we're done if we are out of groups */
179 63482 : if (node->setop_done)
180 0 : return NULL;
181 :
182 : /* Fetch the next tuple group according to the correct strategy */
183 63482 : if (plannode->strategy == SETOP_HASHED)
184 : {
185 31880 : if (!node->table_filled)
186 932 : setop_fill_hash_table(node);
187 31880 : return setop_retrieve_hash_table(node);
188 : }
189 : else
190 31602 : return setop_retrieve_sorted(node);
191 : }
192 :
193 : /*
194 : * ExecSetOp for non-hashed case
195 : */
196 : static TupleTableSlot *
197 31602 : setop_retrieve_sorted(SetOpState *setopstate)
198 : {
199 : PlanState *outerPlan;
200 : PlanState *innerPlan;
201 : TupleTableSlot *resultTupleSlot;
202 :
203 : /*
204 : * get state info from node
205 : */
206 31602 : outerPlan = outerPlanState(setopstate);
207 31602 : innerPlan = innerPlanState(setopstate);
208 31602 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
209 :
210 : /*
211 : * If first time through, establish the invariant that setop_load_group
212 : * expects: each side's nextTupleSlot is the next output from the child
213 : * plan, or empty if there is no more output from it.
214 : */
215 31602 : if (setopstate->need_init)
216 : {
217 810 : setopstate->need_init = false;
218 :
219 810 : setopstate->leftInput.nextTupleSlot = ExecProcNode(outerPlan);
220 :
221 : /*
222 : * If the outer relation is empty, then we will emit nothing, and we
223 : * don't need to read the inner relation at all.
224 : */
225 810 : if (TupIsNull(setopstate->leftInput.nextTupleSlot))
226 : {
227 0 : setopstate->setop_done = true;
228 0 : return NULL;
229 : }
230 :
231 810 : setopstate->rightInput.nextTupleSlot = ExecProcNode(innerPlan);
232 :
233 : /* Set flags that we've not completed either side's group */
234 810 : setopstate->leftInput.needGroup = true;
235 810 : setopstate->rightInput.needGroup = true;
236 : }
237 :
238 : /*
239 : * We loop retrieving groups until we find one we should return
240 : */
241 92082 : while (!setopstate->setop_done)
242 : {
243 : int cmpresult;
244 : SetOpStatePerGroupData pergroup;
245 :
246 : /*
247 : * Fetch the rest of the current outer group, if we didn't already.
248 : */
249 92082 : if (setopstate->leftInput.needGroup)
250 91776 : setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
251 :
252 : /*
253 : * If no more outer groups, we're done, and don't need to look at any
254 : * more of the inner relation.
255 : */
256 92082 : if (setopstate->leftInput.numTuples == 0)
257 : {
258 810 : setopstate->setop_done = true;
259 810 : break;
260 : }
261 :
262 : /*
263 : * Fetch the rest of the current inner group, if we didn't already.
264 : */
265 91272 : if (setopstate->rightInput.needGroup)
266 91194 : setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
267 :
268 : /*
269 : * Determine whether we have matching groups on both sides (this is
270 : * basically like the core logic of a merge join).
271 : */
272 91272 : if (setopstate->rightInput.numTuples == 0)
273 258 : cmpresult = -1; /* as though left input is lesser */
274 : else
275 91014 : cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
276 : setopstate->rightInput.firstTupleSlot,
277 : setopstate);
278 :
279 91272 : if (cmpresult < 0)
280 : {
281 : /* Left group is first, and has no right matches */
282 732 : pergroup.numLeft = setopstate->leftInput.numTuples;
283 732 : pergroup.numRight = 0;
284 : /* We'll need another left group next time */
285 732 : setopstate->leftInput.needGroup = true;
286 : }
287 90540 : else if (cmpresult == 0)
288 : {
289 : /* We have matching groups */
290 90234 : pergroup.numLeft = setopstate->leftInput.numTuples;
291 90234 : pergroup.numRight = setopstate->rightInput.numTuples;
292 : /* We'll need to read from both sides next time */
293 90234 : setopstate->leftInput.needGroup = true;
294 90234 : setopstate->rightInput.needGroup = true;
295 : }
296 : else
297 : {
298 : /* Right group has no left matches, so we can ignore it */
299 306 : setopstate->rightInput.needGroup = true;
300 306 : continue;
301 : }
302 :
303 : /*
304 : * Done scanning these input tuple groups. See if we should emit any
305 : * copies of result tuple, and if so return the first copy. (Note
306 : * that the result tuple is the same as the left input's firstTuple
307 : * slot.)
308 : */
309 90966 : set_output_count(setopstate, &pergroup);
310 :
311 90966 : if (setopstate->numOutput > 0)
312 : {
313 30792 : setopstate->numOutput--;
314 30792 : return resultTupleSlot;
315 : }
316 : }
317 :
318 : /* No more groups */
319 810 : ExecClearTuple(resultTupleSlot);
320 810 : return NULL;
321 : }
322 :
323 : /*
324 : * Load next group of tuples from one child plan or the other.
325 : *
326 : * On entry, we've already read the first tuple of the next group
327 : * (if there is one) into input->nextTupleSlot. This invariant
328 : * is maintained on exit.
329 : */
330 : static void
331 182970 : setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
332 : SetOpState *setopstate)
333 : {
334 182970 : input->needGroup = false;
335 :
336 : /* If we've exhausted this child plan, report an empty group */
337 182970 : if (TupIsNull(input->nextTupleSlot))
338 : {
339 1062 : ExecClearTuple(input->firstTupleSlot);
340 1062 : input->numTuples = 0;
341 1062 : return;
342 : }
343 :
344 : /* Make a local copy of the first tuple for comparisons */
345 181908 : ExecStoreMinimalTuple(ExecCopySlotMinimalTuple(input->nextTupleSlot),
346 : input->firstTupleSlot,
347 : true);
348 : /* and count it */
349 181908 : input->numTuples = 1;
350 :
351 : /* Scan till we find the end-of-group */
352 : for (;;)
353 30330 : {
354 : int cmpresult;
355 :
356 : /* Get next input tuple, if there is one */
357 212238 : input->nextTupleSlot = ExecProcNode(inputPlan);
358 212238 : if (TupIsNull(input->nextTupleSlot))
359 : break;
360 :
361 : /* There is; does it belong to same group as firstTuple? */
362 210624 : cmpresult = setop_compare_slots(input->firstTupleSlot,
363 : input->nextTupleSlot,
364 : setopstate);
365 : Assert(cmpresult <= 0); /* else input is mis-sorted */
366 210624 : if (cmpresult != 0)
367 180294 : break;
368 :
369 : /* Still in same group, so count this tuple */
370 30330 : input->numTuples++;
371 : }
372 : }
373 :
374 : /*
375 : * Compare the tuples in the two given slots.
376 : */
377 : static int
378 301638 : setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
379 : SetOpState *setopstate)
380 : {
381 : /* We'll often need to fetch all the columns, so just do it */
382 301638 : slot_getallattrs(s1);
383 301638 : slot_getallattrs(s2);
384 424062 : for (int nkey = 0; nkey < setopstate->numCols; nkey++)
385 : {
386 303498 : SortSupport sortKey = setopstate->sortKeys + nkey;
387 303498 : AttrNumber attno = sortKey->ssup_attno;
388 303498 : Datum datum1 = s1->tts_values[attno - 1],
389 303498 : datum2 = s2->tts_values[attno - 1];
390 303498 : bool isNull1 = s1->tts_isnull[attno - 1],
391 303498 : isNull2 = s2->tts_isnull[attno - 1];
392 : int compare;
393 :
394 303498 : compare = ApplySortComparator(datum1, isNull1,
395 : datum2, isNull2,
396 : sortKey);
397 303498 : if (compare != 0)
398 181074 : return compare;
399 : }
400 120564 : return 0;
401 : }
402 :
403 : /*
404 : * ExecSetOp for hashed case: phase 1, read inputs and build hash table
405 : */
406 : static void
407 932 : setop_fill_hash_table(SetOpState *setopstate)
408 : {
409 : PlanState *outerPlan;
410 : PlanState *innerPlan;
411 932 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
412 932 : bool have_tuples = false;
413 :
414 : /*
415 : * get state info from node
416 : */
417 932 : outerPlan = outerPlanState(setopstate);
418 932 : innerPlan = innerPlanState(setopstate);
419 :
420 : /*
421 : * Process each outer-plan tuple, and then fetch the next one, until we
422 : * exhaust the outer plan.
423 : */
424 : for (;;)
425 382182 : {
426 : TupleTableSlot *outerslot;
427 383114 : TupleHashTable hashtable = setopstate->hashtable;
428 : TupleHashEntryData *entry;
429 : SetOpStatePerGroup pergroup;
430 : bool isnew;
431 :
432 383114 : outerslot = ExecProcNode(outerPlan);
433 383114 : if (TupIsNull(outerslot))
434 : break;
435 382182 : have_tuples = true;
436 :
437 : /* Find or build hashtable entry for this tuple's group */
438 382182 : entry = LookupTupleHashEntry(hashtable,
439 : outerslot,
440 : &isnew, NULL);
441 :
442 382182 : pergroup = TupleHashEntryGetAdditional(hashtable, entry);
443 : /* If new tuple group, initialize counts to zero */
444 382182 : if (isnew)
445 : {
446 351962 : pergroup->numLeft = 0;
447 351962 : pergroup->numRight = 0;
448 : }
449 :
450 : /* Advance the counts */
451 382182 : pergroup->numLeft++;
452 :
453 : /* Must reset expression context after each hashtable lookup */
454 382182 : ResetExprContext(econtext);
455 : }
456 :
457 : /*
458 : * If the outer relation is empty, then we will emit nothing, and we don't
459 : * need to read the inner relation at all.
460 : */
461 932 : if (have_tuples)
462 : {
463 : /*
464 : * Process each inner-plan tuple, and then fetch the next one, until
465 : * we exhaust the inner plan.
466 : */
467 : for (;;)
468 382110 : {
469 : TupleTableSlot *innerslot;
470 383042 : TupleHashTable hashtable = setopstate->hashtable;
471 : TupleHashEntryData *entry;
472 :
473 383042 : innerslot = ExecProcNode(innerPlan);
474 383042 : if (TupIsNull(innerslot))
475 : break;
476 :
477 : /* For tuples not seen previously, do not make hashtable entry */
478 382110 : entry = LookupTupleHashEntry(hashtable,
479 : innerslot,
480 : NULL, NULL);
481 :
482 : /* Advance the counts if entry is already present */
483 382110 : if (entry)
484 : {
485 351306 : SetOpStatePerGroup pergroup = TupleHashEntryGetAdditional(hashtable, entry);
486 :
487 351306 : pergroup->numRight++;
488 : }
489 :
490 : /* Must reset expression context after each hashtable lookup */
491 382110 : ResetExprContext(econtext);
492 : }
493 : }
494 :
495 932 : setopstate->table_filled = true;
496 : /* Initialize to walk the hash table */
497 932 : ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
498 932 : }
499 :
500 : /*
501 : * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
502 : */
503 : static TupleTableSlot *
504 31880 : setop_retrieve_hash_table(SetOpState *setopstate)
505 : {
506 : TupleHashEntry entry;
507 : TupleTableSlot *resultTupleSlot;
508 :
509 : /*
510 : * get state info from node
511 : */
512 31880 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
513 :
514 : /*
515 : * We loop retrieving groups until we find one we should return
516 : */
517 352894 : while (!setopstate->setop_done)
518 : {
519 352894 : TupleHashTable hashtable = setopstate->hashtable;
520 : SetOpStatePerGroup pergroup;
521 :
522 352894 : CHECK_FOR_INTERRUPTS();
523 :
524 : /*
525 : * Find the next entry in the hash table
526 : */
527 352894 : entry = ScanTupleHashTable(hashtable, &setopstate->hashiter);
528 352894 : if (entry == NULL)
529 : {
530 : /* No more entries in hashtable, so done */
531 932 : setopstate->setop_done = true;
532 932 : return NULL;
533 : }
534 :
535 : /*
536 : * See if we should emit any copies of this tuple, and if so return
537 : * the first copy.
538 : */
539 351962 : pergroup = TupleHashEntryGetAdditional(hashtable, entry);
540 351962 : set_output_count(setopstate, pergroup);
541 :
542 351962 : if (setopstate->numOutput > 0)
543 : {
544 30948 : setopstate->numOutput--;
545 30948 : return ExecStoreMinimalTuple(TupleHashEntryGetTuple(entry),
546 : resultTupleSlot,
547 : false);
548 : }
549 : }
550 :
551 : /* No more groups */
552 0 : ExecClearTuple(resultTupleSlot);
553 0 : return NULL;
554 : }
555 :
556 : /* ----------------------------------------------------------------
557 : * ExecInitSetOp
558 : *
559 : * This initializes the setop node state structures and
560 : * the node's subplan.
561 : * ----------------------------------------------------------------
562 : */
563 : SetOpState *
564 662 : ExecInitSetOp(SetOp *node, EState *estate, int eflags)
565 : {
566 : SetOpState *setopstate;
567 :
568 : /* check for unsupported flags */
569 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
570 :
571 : /*
572 : * create state structure
573 : */
574 662 : setopstate = makeNode(SetOpState);
575 662 : setopstate->ps.plan = (Plan *) node;
576 662 : setopstate->ps.state = estate;
577 662 : setopstate->ps.ExecProcNode = ExecSetOp;
578 :
579 662 : setopstate->setop_done = false;
580 662 : setopstate->numOutput = 0;
581 662 : setopstate->numCols = node->numCols;
582 662 : setopstate->need_init = true;
583 :
584 : /*
585 : * create expression context
586 : */
587 662 : ExecAssignExprContext(estate, &setopstate->ps);
588 :
589 : /*
590 : * If hashing, we also need a longer-lived context to store the hash
591 : * table. The table can't just be kept in the per-query context because
592 : * we want to be able to throw it away in ExecReScanSetOp.
593 : */
594 662 : if (node->strategy == SETOP_HASHED)
595 380 : setopstate->tableContext =
596 380 : AllocSetContextCreate(CurrentMemoryContext,
597 : "SetOp hash table",
598 : ALLOCSET_DEFAULT_SIZES);
599 :
600 : /*
601 : * initialize child nodes
602 : *
603 : * If we are hashing then the child plans do not need to handle REWIND
604 : * efficiently; see ExecReScanSetOp.
605 : */
606 662 : if (node->strategy == SETOP_HASHED)
607 380 : eflags &= ~EXEC_FLAG_REWIND;
608 662 : outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
609 662 : innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
610 :
611 : /*
612 : * Initialize locally-allocated slots. In hashed mode, we just need a
613 : * result slot. In sorted mode, we need one first-tuple-of-group slot for
614 : * each input; we use the result slot for the left input's slot and create
615 : * another for the right input. (Note: the nextTupleSlot slots are not
616 : * ours, but just point to the last slot returned by the input plan node.)
617 : */
618 662 : ExecInitResultTupleSlotTL(&setopstate->ps, &TTSOpsMinimalTuple);
619 662 : if (node->strategy != SETOP_HASHED)
620 : {
621 282 : setopstate->leftInput.firstTupleSlot =
622 282 : setopstate->ps.ps_ResultTupleSlot;
623 282 : setopstate->rightInput.firstTupleSlot =
624 282 : ExecInitExtraTupleSlot(estate,
625 : setopstate->ps.ps_ResultTupleDesc,
626 : &TTSOpsMinimalTuple);
627 : }
628 :
629 : /* Setop nodes do no projections. */
630 662 : setopstate->ps.ps_ProjInfo = NULL;
631 :
632 : /*
633 : * Precompute fmgr lookup data for inner loop. We need equality and
634 : * hashing functions to do it by hashing, while for sorting we need
635 : * SortSupport data.
636 : */
637 662 : if (node->strategy == SETOP_HASHED)
638 380 : execTuplesHashPrepare(node->numCols,
639 380 : node->cmpOperators,
640 : &setopstate->eqfuncoids,
641 : &setopstate->hashfunctions);
642 : else
643 : {
644 282 : int nkeys = node->numCols;
645 :
646 282 : setopstate->sortKeys = (SortSupport)
647 282 : palloc0(nkeys * sizeof(SortSupportData));
648 1440 : for (int i = 0; i < nkeys; i++)
649 : {
650 1158 : SortSupport sortKey = setopstate->sortKeys + i;
651 :
652 1158 : sortKey->ssup_cxt = CurrentMemoryContext;
653 1158 : sortKey->ssup_collation = node->cmpCollations[i];
654 1158 : sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
655 1158 : sortKey->ssup_attno = node->cmpColIdx[i];
656 : /* abbreviated key conversion is not useful here */
657 1158 : sortKey->abbreviate = false;
658 :
659 1158 : PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
660 : }
661 : }
662 :
663 : /* Create a hash table if needed */
664 662 : if (node->strategy == SETOP_HASHED)
665 : {
666 380 : build_hash_table(setopstate);
667 380 : setopstate->table_filled = false;
668 : }
669 :
670 662 : return setopstate;
671 : }
672 :
673 : /* ----------------------------------------------------------------
674 : * ExecEndSetOp
675 : *
676 : * This shuts down the subplans and frees resources allocated
677 : * to this node.
678 : * ----------------------------------------------------------------
679 : */
680 : void
681 662 : ExecEndSetOp(SetOpState *node)
682 : {
683 : /* free subsidiary stuff including hashtable */
684 662 : if (node->tableContext)
685 380 : MemoryContextDelete(node->tableContext);
686 :
687 662 : ExecEndNode(outerPlanState(node));
688 662 : ExecEndNode(innerPlanState(node));
689 662 : }
690 :
691 :
692 : void
693 1200 : ExecReScanSetOp(SetOpState *node)
694 : {
695 1200 : PlanState *outerPlan = outerPlanState(node);
696 1200 : PlanState *innerPlan = innerPlanState(node);
697 :
698 1200 : ExecClearTuple(node->ps.ps_ResultTupleSlot);
699 1200 : node->setop_done = false;
700 1200 : node->numOutput = 0;
701 :
702 1200 : if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
703 : {
704 : /*
705 : * In the hashed case, if we haven't yet built the hash table then we
706 : * can just return; nothing done yet, so nothing to undo. If subnode's
707 : * chgParam is not NULL then it will be re-scanned by ExecProcNode,
708 : * else no reason to re-scan it at all.
709 : */
710 600 : if (!node->table_filled)
711 6 : return;
712 :
713 : /*
714 : * If we do have the hash table and the subplans do not have any
715 : * parameter changes, then we can just rescan the existing hash table;
716 : * no need to build it again.
717 : */
718 594 : if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
719 : {
720 0 : ResetTupleHashIterator(node->hashtable, &node->hashiter);
721 0 : return;
722 : }
723 :
724 : /* Release any hashtable storage */
725 594 : if (node->tableContext)
726 594 : MemoryContextReset(node->tableContext);
727 :
728 : /* And rebuild an empty hashtable */
729 594 : ResetTupleHashTable(node->hashtable);
730 594 : node->table_filled = false;
731 : }
732 : else
733 : {
734 : /* Need to re-read first input from each side */
735 600 : node->need_init = true;
736 : }
737 :
738 : /*
739 : * if chgParam of subnode is not null then plan will be re-scanned by
740 : * first ExecProcNode.
741 : */
742 1194 : if (outerPlan->chgParam == NULL)
743 0 : ExecReScan(outerPlan);
744 1194 : if (innerPlan->chgParam == NULL)
745 0 : ExecReScan(innerPlan);
746 : }
|