Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeSetOp.c
4 : * Routines to handle INTERSECT and EXCEPT selection
5 : *
6 : * The input of a SetOp node consists of two relations (outer and inner)
7 : * with identical column sets. In EXCEPT queries the outer relation is
8 : * always the left side, while in INTERSECT cases the planner tries to
9 : * make the outer relation be the smaller of the two inputs.
10 : *
11 : * In SETOP_SORTED mode, each input has been sorted according to all the
12 : * grouping columns. The SetOp node essentially performs a merge join on
13 : * the grouping columns, except that it is only interested in counting how
14 : * many tuples from each input match. Then it is a simple matter to emit
15 : * the output demanded by the SQL spec for INTERSECT, INTERSECT ALL, EXCEPT,
16 : * or EXCEPT ALL.
17 : *
18 : * In SETOP_HASHED mode, the inputs are delivered in no particular order.
19 : * We read the outer relation and build a hash table in memory with one entry
20 : * for each group of identical tuples, counting the number of tuples in the
21 : * group. Then we read the inner relation and count the number of tuples
22 : * matching each outer group. (We can disregard any tuples appearing only
23 : * in the inner relation, since they cannot result in any output.) After
24 : * seeing all the input, we scan the hashtable and generate the correct
25 : * output using those counts.
26 : *
27 : * This node type is not used for UNION or UNION ALL, since those can be
28 : * implemented more cheaply (there's no need to count the number of
29 : * matching tuples).
30 : *
31 : * Note that SetOp does no qual checking nor projection. The delivered
32 : * output tuples are just copies of the first-to-arrive tuple in each
33 : * input group.
34 : *
35 : *
36 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
37 : * Portions Copyright (c) 1994, Regents of the University of California
38 : *
39 : *
40 : * IDENTIFICATION
41 : * src/backend/executor/nodeSetOp.c
42 : *
43 : *-------------------------------------------------------------------------
44 : */
45 :
46 : #include "postgres.h"
47 :
48 : #include "access/htup_details.h"
49 : #include "executor/executor.h"
50 : #include "executor/nodeSetOp.h"
51 : #include "miscadmin.h"
52 : #include "utils/memutils.h"
53 : #include "utils/sortsupport.h"
54 :
55 :
56 : /*
57 : * SetOpStatePerGroupData - per-group working state
58 : *
59 : * In SETOP_SORTED mode, we need only one of these structs, and it's just a
60 : * local in setop_retrieve_sorted. In SETOP_HASHED mode, the hash table
61 : * contains one of these for each tuple group.
62 : */
63 : typedef struct SetOpStatePerGroupData
64 : {
65 : int64 numLeft; /* number of left-input dups in group */
66 : int64 numRight; /* number of right-input dups in group */
67 : } SetOpStatePerGroupData;
68 :
69 : typedef SetOpStatePerGroupData *SetOpStatePerGroup;
70 :
71 :
72 : static TupleTableSlot *setop_retrieve_sorted(SetOpState *setopstate);
73 : static void setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
74 : SetOpState *setopstate);
75 : static int setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
76 : SetOpState *setopstate);
77 : static void setop_fill_hash_table(SetOpState *setopstate);
78 : static TupleTableSlot *setop_retrieve_hash_table(SetOpState *setopstate);
79 :
80 :
81 : /*
82 : * Initialize the hash table to empty.
83 : */
84 : static void
85 308 : build_hash_table(SetOpState *setopstate)
86 : {
87 308 : SetOp *node = (SetOp *) setopstate->ps.plan;
88 308 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
89 308 : TupleDesc desc = ExecGetResultType(outerPlanState(setopstate));
90 :
91 : Assert(node->strategy == SETOP_HASHED);
92 :
93 : /*
94 : * If both child plans deliver the same fixed tuple slot type, we can tell
95 : * BuildTupleHashTable to expect that slot type as input. Otherwise,
96 : * we'll pass NULL denoting that any slot type is possible.
97 : */
98 308 : setopstate->hashtable = BuildTupleHashTable(&setopstate->ps,
99 : desc,
100 : ExecGetCommonChildSlotOps(&setopstate->ps),
101 : node->numCols,
102 : node->cmpColIdx,
103 308 : setopstate->eqfuncoids,
104 : setopstate->hashfunctions,
105 : node->cmpCollations,
106 : node->numGroups,
107 : sizeof(SetOpStatePerGroupData),
108 308 : setopstate->ps.state->es_query_cxt,
109 : setopstate->tuplesContext,
110 : econtext->ecxt_per_tuple_memory,
111 : false);
112 308 : }
113 :
114 : /* Planner support routine to estimate space needed for hash table */
115 : Size
116 549 : EstimateSetOpHashTableSpace(double nentries, Size tupleWidth)
117 : {
118 549 : return EstimateTupleHashTableSpace(nentries,
119 : tupleWidth,
120 : sizeof(SetOpStatePerGroupData));
121 : }
122 :
123 : /*
124 : * We've completed processing a tuple group. Decide how many copies (if any)
125 : * of its representative row to emit, and store the count into numOutput.
126 : * This logic is straight from the SQL92 specification.
127 : */
128 : static void
129 295452 : set_output_count(SetOpState *setopstate, SetOpStatePerGroup pergroup)
130 : {
131 295452 : SetOp *plannode = (SetOp *) setopstate->ps.plan;
132 :
133 295452 : switch (plannode->cmd)
134 : {
135 40240 : case SETOPCMD_INTERSECT:
136 40240 : if (pergroup->numLeft > 0 && pergroup->numRight > 0)
137 40152 : setopstate->numOutput = 1;
138 : else
139 88 : setopstate->numOutput = 0;
140 40240 : break;
141 24 : case SETOPCMD_INTERSECT_ALL:
142 24 : setopstate->numOutput =
143 24 : (pergroup->numLeft < pergroup->numRight) ?
144 24 : pergroup->numLeft : pergroup->numRight;
145 24 : break;
146 247132 : case SETOPCMD_EXCEPT:
147 247132 : if (pergroup->numLeft > 0 && pergroup->numRight == 0)
148 956 : setopstate->numOutput = 1;
149 : else
150 246176 : setopstate->numOutput = 0;
151 247132 : break;
152 8056 : case SETOPCMD_EXCEPT_ALL:
153 8056 : setopstate->numOutput =
154 8056 : (pergroup->numLeft < pergroup->numRight) ?
155 8056 : 0 : (pergroup->numLeft - pergroup->numRight);
156 8056 : break;
157 0 : default:
158 0 : elog(ERROR, "unrecognized set op: %d", (int) plannode->cmd);
159 : break;
160 : }
161 295452 : }
162 :
163 :
164 : /* ----------------------------------------------------------------
165 : * ExecSetOp
166 : * ----------------------------------------------------------------
167 : */
168 : static TupleTableSlot * /* return: a tuple or NULL */
169 42396 : ExecSetOp(PlanState *pstate)
170 : {
171 42396 : SetOpState *node = castNode(SetOpState, pstate);
172 42396 : SetOp *plannode = (SetOp *) node->ps.plan;
173 42396 : TupleTableSlot *resultTupleSlot = node->ps.ps_ResultTupleSlot;
174 :
175 42396 : CHECK_FOR_INTERRUPTS();
176 :
177 : /*
178 : * If the previously-returned tuple needs to be returned more than once,
179 : * keep returning it.
180 : */
181 42396 : if (node->numOutput > 0)
182 : {
183 32 : node->numOutput--;
184 32 : return resultTupleSlot;
185 : }
186 :
187 : /* Otherwise, we're done if we are out of groups */
188 42364 : if (node->setop_done)
189 0 : return NULL;
190 :
191 : /* Fetch the next tuple group according to the correct strategy */
192 42364 : if (plannode->strategy == SETOP_HASHED)
193 : {
194 21312 : if (!node->table_filled)
195 672 : setop_fill_hash_table(node);
196 21312 : return setop_retrieve_hash_table(node);
197 : }
198 : else
199 21052 : return setop_retrieve_sorted(node);
200 : }
201 :
202 : /*
203 : * ExecSetOp for non-hashed case
204 : */
205 : static TupleTableSlot *
206 21052 : setop_retrieve_sorted(SetOpState *setopstate)
207 : {
208 : PlanState *outerPlan;
209 : PlanState *innerPlan;
210 : TupleTableSlot *resultTupleSlot;
211 :
212 : /*
213 : * get state info from node
214 : */
215 21052 : outerPlan = outerPlanState(setopstate);
216 21052 : innerPlan = innerPlanState(setopstate);
217 21052 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
218 :
219 : /*
220 : * If first time through, establish the invariant that setop_load_group
221 : * expects: each side's nextTupleSlot is the next output from the child
222 : * plan, or empty if there is no more output from it.
223 : */
224 21052 : if (setopstate->need_init)
225 : {
226 524 : setopstate->need_init = false;
227 :
228 524 : setopstate->leftInput.nextTupleSlot = ExecProcNode(outerPlan);
229 :
230 : /*
231 : * If the outer relation is empty, then we will emit nothing, and we
232 : * don't need to read the inner relation at all.
233 : */
234 524 : if (TupIsNull(setopstate->leftInput.nextTupleSlot))
235 : {
236 0 : setopstate->setop_done = true;
237 0 : return NULL;
238 : }
239 :
240 524 : setopstate->rightInput.nextTupleSlot = ExecProcNode(innerPlan);
241 :
242 : /* Set flags that we've not completed either side's group */
243 524 : setopstate->leftInput.needGroup = true;
244 524 : setopstate->rightInput.needGroup = true;
245 : }
246 :
247 : /*
248 : * We loop retrieving groups until we find one we should return
249 : */
250 61324 : while (!setopstate->setop_done)
251 : {
252 : int cmpresult;
253 : SetOpStatePerGroupData pergroup;
254 :
255 : /*
256 : * Fetch the rest of the current outer group, if we didn't already.
257 : */
258 61324 : if (setopstate->leftInput.needGroup)
259 61120 : setop_load_group(&setopstate->leftInput, outerPlan, setopstate);
260 :
261 : /*
262 : * If no more outer groups, we're done, and don't need to look at any
263 : * more of the inner relation.
264 : */
265 61324 : if (setopstate->leftInput.numTuples == 0)
266 : {
267 524 : setopstate->setop_done = true;
268 524 : break;
269 : }
270 :
271 : /*
272 : * Fetch the rest of the current inner group, if we didn't already.
273 : */
274 60800 : if (setopstate->rightInput.needGroup)
275 60748 : setop_load_group(&setopstate->rightInput, innerPlan, setopstate);
276 :
277 : /*
278 : * Determine whether we have matching groups on both sides (this is
279 : * basically like the core logic of a merge join).
280 : */
281 60800 : if (setopstate->rightInput.numTuples == 0)
282 172 : cmpresult = -1; /* as though left input is lesser */
283 : else
284 60628 : cmpresult = setop_compare_slots(setopstate->leftInput.firstTupleSlot,
285 : setopstate->rightInput.firstTupleSlot,
286 : setopstate);
287 :
288 60800 : if (cmpresult < 0)
289 : {
290 : /* Left group is first, and has no right matches */
291 488 : pergroup.numLeft = setopstate->leftInput.numTuples;
292 488 : pergroup.numRight = 0;
293 : /* We'll need another left group next time */
294 488 : setopstate->leftInput.needGroup = true;
295 : }
296 60312 : else if (cmpresult == 0)
297 : {
298 : /* We have matching groups */
299 60108 : pergroup.numLeft = setopstate->leftInput.numTuples;
300 60108 : pergroup.numRight = setopstate->rightInput.numTuples;
301 : /* We'll need to read from both sides next time */
302 60108 : setopstate->leftInput.needGroup = true;
303 60108 : setopstate->rightInput.needGroup = true;
304 : }
305 : else
306 : {
307 : /* Right group has no left matches, so we can ignore it */
308 204 : setopstate->rightInput.needGroup = true;
309 204 : continue;
310 : }
311 :
312 : /*
313 : * Done scanning these input tuple groups. See if we should emit any
314 : * copies of result tuple, and if so return the first copy. (Note
315 : * that the result tuple is the same as the left input's firstTuple
316 : * slot.)
317 : */
318 60596 : set_output_count(setopstate, &pergroup);
319 :
320 60596 : if (setopstate->numOutput > 0)
321 : {
322 20528 : setopstate->numOutput--;
323 20528 : return resultTupleSlot;
324 : }
325 : }
326 :
327 : /* No more groups */
328 524 : ExecClearTuple(resultTupleSlot);
329 524 : return NULL;
330 : }
331 :
332 : /*
333 : * Load next group of tuples from one child plan or the other.
334 : *
335 : * On entry, we've already read the first tuple of the next group
336 : * (if there is one) into input->nextTupleSlot. This invariant
337 : * is maintained on exit.
338 : */
339 : static void
340 121868 : setop_load_group(SetOpStatePerInput *input, PlanState *inputPlan,
341 : SetOpState *setopstate)
342 : {
343 121868 : input->needGroup = false;
344 :
345 : /* If we've exhausted this child plan, report an empty group */
346 121868 : if (TupIsNull(input->nextTupleSlot))
347 : {
348 692 : ExecClearTuple(input->firstTupleSlot);
349 692 : input->numTuples = 0;
350 692 : return;
351 : }
352 :
353 : /* Make a local copy of the first tuple for comparisons */
354 121176 : ExecStoreMinimalTuple(ExecCopySlotMinimalTuple(input->nextTupleSlot),
355 : input->firstTupleSlot,
356 : true);
357 : /* and count it */
358 121176 : input->numTuples = 1;
359 :
360 : /* Scan till we find the end-of-group */
361 : for (;;)
362 20220 : {
363 : int cmpresult;
364 :
365 : /* Get next input tuple, if there is one */
366 141396 : input->nextTupleSlot = ExecProcNode(inputPlan);
367 141396 : if (TupIsNull(input->nextTupleSlot))
368 : break;
369 :
370 : /* There is; does it belong to same group as firstTuple? */
371 140352 : cmpresult = setop_compare_slots(input->firstTupleSlot,
372 : input->nextTupleSlot,
373 : setopstate);
374 : Assert(cmpresult <= 0); /* else input is mis-sorted */
375 140352 : if (cmpresult != 0)
376 120132 : break;
377 :
378 : /* Still in same group, so count this tuple */
379 20220 : input->numTuples++;
380 : }
381 : }
382 :
383 : /*
384 : * Compare the tuples in the two given slots.
385 : */
386 : static int
387 200980 : setop_compare_slots(TupleTableSlot *s1, TupleTableSlot *s2,
388 : SetOpState *setopstate)
389 : {
390 : /* We'll often need to fetch all the columns, so just do it */
391 200980 : slot_getallattrs(s1);
392 200980 : slot_getallattrs(s2);
393 281108 : for (int nkey = 0; nkey < setopstate->numCols; nkey++)
394 : {
395 200780 : SortSupport sortKey = setopstate->sortKeys + nkey;
396 200780 : AttrNumber attno = sortKey->ssup_attno;
397 200780 : Datum datum1 = s1->tts_values[attno - 1],
398 200780 : datum2 = s2->tts_values[attno - 1];
399 200780 : bool isNull1 = s1->tts_isnull[attno - 1],
400 200780 : isNull2 = s2->tts_isnull[attno - 1];
401 : int compare;
402 :
403 200780 : compare = ApplySortComparator(datum1, isNull1,
404 : datum2, isNull2,
405 : sortKey);
406 200780 : if (compare != 0)
407 120652 : return compare;
408 : }
409 80328 : return 0;
410 : }
411 :
412 : /*
413 : * ExecSetOp for hashed case: phase 1, read inputs and build hash table
414 : */
415 : static void
416 672 : setop_fill_hash_table(SetOpState *setopstate)
417 : {
418 : PlanState *outerPlan;
419 : PlanState *innerPlan;
420 672 : ExprContext *econtext = setopstate->ps.ps_ExprContext;
421 672 : bool have_tuples = false;
422 :
423 : /*
424 : * get state info from node
425 : */
426 672 : outerPlan = outerPlanState(setopstate);
427 672 : innerPlan = innerPlanState(setopstate);
428 :
429 : /*
430 : * Process each outer-plan tuple, and then fetch the next one, until we
431 : * exhaust the outer plan.
432 : */
433 : for (;;)
434 255001 : {
435 : TupleTableSlot *outerslot;
436 255673 : TupleHashTable hashtable = setopstate->hashtable;
437 : TupleHashEntryData *entry;
438 : SetOpStatePerGroup pergroup;
439 : bool isnew;
440 :
441 255673 : outerslot = ExecProcNode(outerPlan);
442 255673 : if (TupIsNull(outerslot))
443 : break;
444 255001 : have_tuples = true;
445 :
446 : /* Find or build hashtable entry for this tuple's group */
447 255001 : entry = LookupTupleHashEntry(hashtable,
448 : outerslot,
449 : &isnew, NULL);
450 :
451 255001 : pergroup = TupleHashEntryGetAdditional(hashtable, entry);
452 : /* If new tuple group, initialize counts to zero */
453 255001 : if (isnew)
454 : {
455 234856 : pergroup->numLeft = 0;
456 234856 : pergroup->numRight = 0;
457 : }
458 :
459 : /* Advance the counts */
460 255001 : pergroup->numLeft++;
461 :
462 : /* Must reset expression context after each hashtable lookup */
463 255001 : ResetExprContext(econtext);
464 : }
465 :
466 : /*
467 : * If the outer relation is empty, then we will emit nothing, and we don't
468 : * need to read the inner relation at all.
469 : */
470 672 : if (have_tuples)
471 : {
472 : /*
473 : * Process each inner-plan tuple, and then fetch the next one, until
474 : * we exhaust the inner plan.
475 : */
476 : for (;;)
477 254945 : {
478 : TupleTableSlot *innerslot;
479 255617 : TupleHashTable hashtable = setopstate->hashtable;
480 : TupleHashEntryData *entry;
481 :
482 255617 : innerslot = ExecProcNode(innerPlan);
483 255617 : if (TupIsNull(innerslot))
484 : break;
485 :
486 : /* For tuples not seen previously, do not make hashtable entry */
487 254945 : entry = LookupTupleHashEntry(hashtable,
488 : innerslot,
489 : NULL, NULL);
490 :
491 : /* Advance the counts if entry is already present */
492 254945 : if (entry)
493 : {
494 234409 : SetOpStatePerGroup pergroup = TupleHashEntryGetAdditional(hashtable, entry);
495 :
496 234409 : pergroup->numRight++;
497 : }
498 :
499 : /* Must reset expression context after each hashtable lookup */
500 254945 : ResetExprContext(econtext);
501 : }
502 : }
503 :
504 672 : setopstate->table_filled = true;
505 : /* Initialize to walk the hash table */
506 672 : ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
507 672 : }
508 :
509 : /*
510 : * ExecSetOp for hashed case: phase 2, retrieving groups from hash table
511 : */
512 : static TupleTableSlot *
513 21312 : setop_retrieve_hash_table(SetOpState *setopstate)
514 : {
515 : TupleHashEntry entry;
516 : TupleTableSlot *resultTupleSlot;
517 :
518 : /*
519 : * get state info from node
520 : */
521 21312 : resultTupleSlot = setopstate->ps.ps_ResultTupleSlot;
522 :
523 : /*
524 : * We loop retrieving groups until we find one we should return
525 : */
526 235528 : while (!setopstate->setop_done)
527 : {
528 235528 : TupleHashTable hashtable = setopstate->hashtable;
529 : SetOpStatePerGroup pergroup;
530 :
531 235528 : CHECK_FOR_INTERRUPTS();
532 :
533 : /*
534 : * Find the next entry in the hash table
535 : */
536 235528 : entry = ScanTupleHashTable(hashtable, &setopstate->hashiter);
537 235528 : if (entry == NULL)
538 : {
539 : /* No more entries in hashtable, so done */
540 672 : setopstate->setop_done = true;
541 672 : return NULL;
542 : }
543 :
544 : /*
545 : * See if we should emit any copies of this tuple, and if so return
546 : * the first copy.
547 : */
548 234856 : pergroup = TupleHashEntryGetAdditional(hashtable, entry);
549 234856 : set_output_count(setopstate, pergroup);
550 :
551 234856 : if (setopstate->numOutput > 0)
552 : {
553 20640 : setopstate->numOutput--;
554 20640 : return ExecStoreMinimalTuple(TupleHashEntryGetTuple(entry),
555 : resultTupleSlot,
556 : false);
557 : }
558 : }
559 :
560 : /* No more groups */
561 0 : ExecClearTuple(resultTupleSlot);
562 0 : return NULL;
563 : }
564 :
565 : /* ----------------------------------------------------------------
566 : * ExecInitSetOp
567 : *
568 : * This initializes the setop node state structures and
569 : * the node's subplan.
570 : * ----------------------------------------------------------------
571 : */
572 : SetOpState *
573 480 : ExecInitSetOp(SetOp *node, EState *estate, int eflags)
574 : {
575 : SetOpState *setopstate;
576 :
577 : /* check for unsupported flags */
578 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
579 :
580 : /*
581 : * create state structure
582 : */
583 480 : setopstate = makeNode(SetOpState);
584 480 : setopstate->ps.plan = (Plan *) node;
585 480 : setopstate->ps.state = estate;
586 480 : setopstate->ps.ExecProcNode = ExecSetOp;
587 :
588 480 : setopstate->setop_done = false;
589 480 : setopstate->numOutput = 0;
590 480 : setopstate->numCols = node->numCols;
591 480 : setopstate->need_init = true;
592 :
593 : /*
594 : * create expression context
595 : */
596 480 : ExecAssignExprContext(estate, &setopstate->ps);
597 :
598 : /*
599 : * If hashing, we also need a longer-lived context to store the hash
600 : * table. The table can't just be kept in the per-query context because
601 : * we want to be able to throw it away in ExecReScanSetOp. We can use a
602 : * BumpContext to save storage, because we will have no need to delete
603 : * individual table entries.
604 : */
605 480 : if (node->strategy == SETOP_HASHED)
606 308 : setopstate->tuplesContext =
607 308 : BumpContextCreate(CurrentMemoryContext,
608 : "SetOp hashed tuples",
609 : ALLOCSET_DEFAULT_SIZES);
610 :
611 : /*
612 : * initialize child nodes
613 : *
614 : * If we are hashing then the child plans do not need to handle REWIND
615 : * efficiently; see ExecReScanSetOp.
616 : */
617 480 : if (node->strategy == SETOP_HASHED)
618 308 : eflags &= ~EXEC_FLAG_REWIND;
619 480 : outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
620 480 : innerPlanState(setopstate) = ExecInitNode(innerPlan(node), estate, eflags);
621 :
622 : /*
623 : * Initialize locally-allocated slots. In hashed mode, we just need a
624 : * result slot. In sorted mode, we need one first-tuple-of-group slot for
625 : * each input; we use the result slot for the left input's slot and create
626 : * another for the right input. (Note: the nextTupleSlot slots are not
627 : * ours, but just point to the last slot returned by the input plan node.)
628 : */
629 480 : ExecInitResultTupleSlotTL(&setopstate->ps, &TTSOpsMinimalTuple);
630 480 : if (node->strategy != SETOP_HASHED)
631 : {
632 172 : setopstate->leftInput.firstTupleSlot =
633 172 : setopstate->ps.ps_ResultTupleSlot;
634 172 : setopstate->rightInput.firstTupleSlot =
635 172 : ExecInitExtraTupleSlot(estate,
636 : setopstate->ps.ps_ResultTupleDesc,
637 : &TTSOpsMinimalTuple);
638 : }
639 :
640 : /* Setop nodes do no projections. */
641 480 : setopstate->ps.ps_ProjInfo = NULL;
642 :
643 : /*
644 : * Precompute fmgr lookup data for inner loop. We need equality and
645 : * hashing functions to do it by hashing, while for sorting we need
646 : * SortSupport data.
647 : */
648 480 : if (node->strategy == SETOP_HASHED)
649 308 : execTuplesHashPrepare(node->numCols,
650 308 : node->cmpOperators,
651 : &setopstate->eqfuncoids,
652 : &setopstate->hashfunctions);
653 : else
654 : {
655 172 : int nkeys = node->numCols;
656 :
657 172 : setopstate->sortKeys = (SortSupport)
658 172 : palloc0(nkeys * sizeof(SortSupportData));
659 448 : for (int i = 0; i < nkeys; i++)
660 : {
661 276 : SortSupport sortKey = setopstate->sortKeys + i;
662 :
663 276 : sortKey->ssup_cxt = CurrentMemoryContext;
664 276 : sortKey->ssup_collation = node->cmpCollations[i];
665 276 : sortKey->ssup_nulls_first = node->cmpNullsFirst[i];
666 276 : sortKey->ssup_attno = node->cmpColIdx[i];
667 : /* abbreviated key conversion is not useful here */
668 276 : sortKey->abbreviate = false;
669 :
670 276 : PrepareSortSupportFromOrderingOp(node->cmpOperators[i], sortKey);
671 : }
672 : }
673 :
674 : /* Create a hash table if needed */
675 480 : if (node->strategy == SETOP_HASHED)
676 : {
677 308 : build_hash_table(setopstate);
678 308 : setopstate->table_filled = false;
679 : }
680 :
681 480 : return setopstate;
682 : }
683 :
684 : /* ----------------------------------------------------------------
685 : * ExecEndSetOp
686 : *
687 : * This shuts down the subplans and frees resources allocated
688 : * to this node.
689 : * ----------------------------------------------------------------
690 : */
691 : void
692 480 : ExecEndSetOp(SetOpState *node)
693 : {
694 : /* free subsidiary stuff including hashtable data */
695 480 : if (node->tuplesContext)
696 308 : MemoryContextDelete(node->tuplesContext);
697 :
698 480 : ExecEndNode(outerPlanState(node));
699 480 : ExecEndNode(innerPlanState(node));
700 480 : }
701 :
702 :
703 : void
704 800 : ExecReScanSetOp(SetOpState *node)
705 : {
706 800 : PlanState *outerPlan = outerPlanState(node);
707 800 : PlanState *innerPlan = innerPlanState(node);
708 :
709 800 : ExecClearTuple(node->ps.ps_ResultTupleSlot);
710 800 : node->setop_done = false;
711 800 : node->numOutput = 0;
712 :
713 800 : if (((SetOp *) node->ps.plan)->strategy == SETOP_HASHED)
714 : {
715 : /*
716 : * In the hashed case, if we haven't yet built the hash table then we
717 : * can just return; nothing done yet, so nothing to undo. If subnode's
718 : * chgParam is not NULL then it will be re-scanned by ExecProcNode,
719 : * else no reason to re-scan it at all.
720 : */
721 400 : if (!node->table_filled)
722 4 : return;
723 :
724 : /*
725 : * If we do have the hash table and the subplans do not have any
726 : * parameter changes, then we can just rescan the existing hash table;
727 : * no need to build it again.
728 : */
729 396 : if (outerPlan->chgParam == NULL && innerPlan->chgParam == NULL)
730 : {
731 0 : ResetTupleHashIterator(node->hashtable, &node->hashiter);
732 0 : return;
733 : }
734 :
735 : /* Else, we must rebuild the hashtable */
736 396 : ResetTupleHashTable(node->hashtable);
737 396 : node->table_filled = false;
738 : }
739 : else
740 : {
741 : /* Need to re-read first input from each side */
742 400 : node->need_init = true;
743 : }
744 :
745 : /*
746 : * if chgParam of subnode is not null then plan will be re-scanned by
747 : * first ExecProcNode.
748 : */
749 796 : if (outerPlan->chgParam == NULL)
750 0 : ExecReScan(outerPlan);
751 796 : if (innerPlan->chgParam == NULL)
752 0 : ExecReScan(innerPlan);
753 : }
|