Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pathnode.c
4 : * Routines to manipulate pathlists and create path nodes
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/util/pathnode.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include <math.h>
18 :
19 : #include "foreign/fdwapi.h"
20 : #include "miscadmin.h"
21 : #include "nodes/extensible.h"
22 : #include "optimizer/appendinfo.h"
23 : #include "optimizer/clauses.h"
24 : #include "optimizer/cost.h"
25 : #include "optimizer/optimizer.h"
26 : #include "optimizer/pathnode.h"
27 : #include "optimizer/paths.h"
28 : #include "optimizer/planmain.h"
29 : #include "optimizer/tlist.h"
30 : #include "parser/parsetree.h"
31 : #include "utils/memutils.h"
32 : #include "utils/selfuncs.h"
33 :
34 : typedef enum
35 : {
36 : COSTS_EQUAL, /* path costs are fuzzily equal */
37 : COSTS_BETTER1, /* first path is cheaper than second */
38 : COSTS_BETTER2, /* second path is cheaper than first */
39 : COSTS_DIFFERENT, /* neither path dominates the other on cost */
40 : } PathCostComparison;
41 :
42 : /*
43 : * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
44 : * XXX is it worth making this user-controllable? It provides a tradeoff
45 : * between planner runtime and the accuracy of path cost comparisons.
46 : */
47 : #define STD_FUZZ_FACTOR 1.01
48 :
49 : static List *translate_sub_tlist(List *tlist, int relid);
50 : static int append_total_cost_compare(const ListCell *a, const ListCell *b);
51 : static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
52 : static List *reparameterize_pathlist_by_child(PlannerInfo *root,
53 : List *pathlist,
54 : RelOptInfo *child_rel);
55 : static bool pathlist_is_reparameterizable_by_child(List *pathlist,
56 : RelOptInfo *child_rel);
57 :
58 :
59 : /*****************************************************************************
60 : * MISC. PATH UTILITIES
61 : *****************************************************************************/
62 :
63 : /*
64 : * compare_path_costs
65 : * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
66 : * or more expensive than path2 for the specified criterion.
67 : */
68 : int
69 983712 : compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
70 : {
71 : /* Number of disabled nodes, if different, trumps all else. */
72 983712 : if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
73 : {
74 2604 : if (path1->disabled_nodes < path2->disabled_nodes)
75 2604 : return -1;
76 : else
77 0 : return +1;
78 : }
79 :
80 981108 : if (criterion == STARTUP_COST)
81 : {
82 498346 : if (path1->startup_cost < path2->startup_cost)
83 294676 : return -1;
84 203670 : if (path1->startup_cost > path2->startup_cost)
85 100266 : return +1;
86 :
87 : /*
88 : * If paths have the same startup cost (not at all unlikely), order
89 : * them by total cost.
90 : */
91 103404 : if (path1->total_cost < path2->total_cost)
92 55686 : return -1;
93 47718 : if (path1->total_cost > path2->total_cost)
94 4490 : return +1;
95 : }
96 : else
97 : {
98 482762 : if (path1->total_cost < path2->total_cost)
99 456266 : return -1;
100 26496 : if (path1->total_cost > path2->total_cost)
101 3876 : return +1;
102 :
103 : /*
104 : * If paths have the same total cost, order them by startup cost.
105 : */
106 22620 : if (path1->startup_cost < path2->startup_cost)
107 2586 : return -1;
108 20034 : if (path1->startup_cost > path2->startup_cost)
109 8 : return +1;
110 : }
111 63254 : return 0;
112 : }
113 :
114 : /*
115 : * compare_fractional_path_costs
116 : * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
117 : * or more expensive than path2 for fetching the specified fraction
118 : * of the total tuples.
119 : *
120 : * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
121 : * path with the cheaper total_cost.
122 : */
123 : int
124 6100 : compare_fractional_path_costs(Path *path1, Path *path2,
125 : double fraction)
126 : {
127 : Cost cost1,
128 : cost2;
129 :
130 : /* Number of disabled nodes, if different, trumps all else. */
131 6100 : if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
132 : {
133 36 : if (path1->disabled_nodes < path2->disabled_nodes)
134 36 : return -1;
135 : else
136 0 : return +1;
137 : }
138 :
139 6064 : if (fraction <= 0.0 || fraction >= 1.0)
140 1738 : return compare_path_costs(path1, path2, TOTAL_COST);
141 4326 : cost1 = path1->startup_cost +
142 4326 : fraction * (path1->total_cost - path1->startup_cost);
143 4326 : cost2 = path2->startup_cost +
144 4326 : fraction * (path2->total_cost - path2->startup_cost);
145 4326 : if (cost1 < cost2)
146 3576 : return -1;
147 750 : if (cost1 > cost2)
148 750 : return +1;
149 0 : return 0;
150 : }
151 :
152 : /*
153 : * compare_path_costs_fuzzily
154 : * Compare the costs of two paths to see if either can be said to
155 : * dominate the other.
156 : *
157 : * We use fuzzy comparisons so that add_path() can avoid keeping both of
158 : * a pair of paths that really have insignificantly different cost.
159 : *
160 : * The fuzz_factor argument must be 1.0 plus delta, where delta is the
161 : * fraction of the smaller cost that is considered to be a significant
162 : * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
163 : * be 1% of the smaller cost.
164 : *
165 : * The two paths are said to have "equal" costs if both startup and total
166 : * costs are fuzzily the same. Path1 is said to be better than path2 if
167 : * it has fuzzily better startup cost and fuzzily no worse total cost,
168 : * or if it has fuzzily better total cost and fuzzily no worse startup cost.
169 : * Path2 is better than path1 if the reverse holds. Finally, if one path
170 : * is fuzzily better than the other on startup cost and fuzzily worse on
171 : * total cost, we just say that their costs are "different", since neither
172 : * dominates the other across the whole performance spectrum.
173 : *
174 : * This function also enforces a policy rule that paths for which the relevant
175 : * one of parent->consider_startup and parent->consider_param_startup is false
176 : * cannot survive comparisons solely on the grounds of good startup cost, so
177 : * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
178 : * (But if total costs are fuzzily equal, we compare startup costs anyway,
179 : * in hopes of eliminating one path or the other.)
180 : */
181 : static PathCostComparison
182 4076628 : compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
183 : {
184 : #define CONSIDER_PATH_STARTUP_COST(p) \
185 : ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
186 :
187 : /* Number of disabled nodes, if different, trumps all else. */
188 4076628 : if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
189 : {
190 30582 : if (path1->disabled_nodes < path2->disabled_nodes)
191 16718 : return COSTS_BETTER1;
192 : else
193 13864 : return COSTS_BETTER2;
194 : }
195 :
196 : /*
197 : * Check total cost first since it's more likely to be different; many
198 : * paths have zero startup cost.
199 : */
200 4046046 : if (path1->total_cost > path2->total_cost * fuzz_factor)
201 : {
202 : /* path1 fuzzily worse on total cost */
203 2113268 : if (CONSIDER_PATH_STARTUP_COST(path1) &&
204 125538 : path2->startup_cost > path1->startup_cost * fuzz_factor)
205 : {
206 : /* ... but path2 fuzzily worse on startup, so DIFFERENT */
207 84582 : return COSTS_DIFFERENT;
208 : }
209 : /* else path2 dominates */
210 2028686 : return COSTS_BETTER2;
211 : }
212 1932778 : if (path2->total_cost > path1->total_cost * fuzz_factor)
213 : {
214 : /* path2 fuzzily worse on total cost */
215 1005022 : if (CONSIDER_PATH_STARTUP_COST(path2) &&
216 54834 : path1->startup_cost > path2->startup_cost * fuzz_factor)
217 : {
218 : /* ... but path1 fuzzily worse on startup, so DIFFERENT */
219 35796 : return COSTS_DIFFERENT;
220 : }
221 : /* else path1 dominates */
222 969226 : return COSTS_BETTER1;
223 : }
224 : /* fuzzily the same on total cost ... */
225 927756 : if (path1->startup_cost > path2->startup_cost * fuzz_factor)
226 : {
227 : /* ... but path1 fuzzily worse on startup, so path2 wins */
228 369826 : return COSTS_BETTER2;
229 : }
230 557930 : if (path2->startup_cost > path1->startup_cost * fuzz_factor)
231 : {
232 : /* ... but path2 fuzzily worse on startup, so path1 wins */
233 62544 : return COSTS_BETTER1;
234 : }
235 : /* fuzzily the same on both costs */
236 495386 : return COSTS_EQUAL;
237 :
238 : #undef CONSIDER_PATH_STARTUP_COST
239 : }
240 :
241 : /*
242 : * set_cheapest
243 : * Find the minimum-cost paths from among a relation's paths,
244 : * and save them in the rel's cheapest-path fields.
245 : *
246 : * cheapest_total_path is normally the cheapest-total-cost unparameterized
247 : * path; but if there are no unparameterized paths, we assign it to be the
248 : * best (cheapest least-parameterized) parameterized path. However, only
249 : * unparameterized paths are considered candidates for cheapest_startup_path,
250 : * so that will be NULL if there are no unparameterized paths.
251 : *
252 : * The cheapest_parameterized_paths list collects all parameterized paths
253 : * that have survived the add_path() tournament for this relation. (Since
254 : * add_path ignores pathkeys for a parameterized path, these will be paths
255 : * that have best cost or best row count for their parameterization. We
256 : * may also have both a parallel-safe and a non-parallel-safe path in some
257 : * cases for the same parameterization in some cases, but this should be
258 : * relatively rare since, most typically, all paths for the same relation
259 : * will be parallel-safe or none of them will.)
260 : *
261 : * cheapest_parameterized_paths always includes the cheapest-total
262 : * unparameterized path, too, if there is one; the users of that list find
263 : * it more convenient if that's included.
264 : *
265 : * This is normally called only after we've finished constructing the path
266 : * list for the rel node.
267 : */
268 : void
269 2005314 : set_cheapest(RelOptInfo *parent_rel)
270 : {
271 : Path *cheapest_startup_path;
272 : Path *cheapest_total_path;
273 : Path *best_param_path;
274 : List *parameterized_paths;
275 : ListCell *p;
276 :
277 : Assert(IsA(parent_rel, RelOptInfo));
278 :
279 2005314 : if (parent_rel->pathlist == NIL)
280 0 : elog(ERROR, "could not devise a query plan for the given query");
281 :
282 2005314 : cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
283 2005314 : parameterized_paths = NIL;
284 :
285 4524154 : foreach(p, parent_rel->pathlist)
286 : {
287 2518840 : Path *path = (Path *) lfirst(p);
288 : int cmp;
289 :
290 2518840 : if (path->param_info)
291 : {
292 : /* Parameterized path, so add it to parameterized_paths */
293 131110 : parameterized_paths = lappend(parameterized_paths, path);
294 :
295 : /*
296 : * If we have an unparameterized cheapest-total, we no longer care
297 : * about finding the best parameterized path, so move on.
298 : */
299 131110 : if (cheapest_total_path)
300 25880 : continue;
301 :
302 : /*
303 : * Otherwise, track the best parameterized path, which is the one
304 : * with least total cost among those of the minimum
305 : * parameterization.
306 : */
307 105230 : if (best_param_path == NULL)
308 96874 : best_param_path = path;
309 : else
310 : {
311 8356 : switch (bms_subset_compare(PATH_REQ_OUTER(path),
312 8356 : PATH_REQ_OUTER(best_param_path)))
313 : {
314 54 : case BMS_EQUAL:
315 : /* keep the cheaper one */
316 54 : if (compare_path_costs(path, best_param_path,
317 : TOTAL_COST) < 0)
318 0 : best_param_path = path;
319 54 : break;
320 368 : case BMS_SUBSET1:
321 : /* new path is less-parameterized */
322 368 : best_param_path = path;
323 368 : break;
324 0 : case BMS_SUBSET2:
325 : /* old path is less-parameterized, keep it */
326 0 : break;
327 7934 : case BMS_DIFFERENT:
328 :
329 : /*
330 : * This means that neither path has the least possible
331 : * parameterization for the rel. We'll sit on the old
332 : * path until something better comes along.
333 : */
334 7934 : break;
335 : }
336 : }
337 : }
338 : else
339 : {
340 : /* Unparameterized path, so consider it for cheapest slots */
341 2387730 : if (cheapest_total_path == NULL)
342 : {
343 1993748 : cheapest_startup_path = cheapest_total_path = path;
344 1993748 : continue;
345 : }
346 :
347 : /*
348 : * If we find two paths of identical costs, try to keep the
349 : * better-sorted one. The paths might have unrelated sort
350 : * orderings, in which case we can only guess which might be
351 : * better to keep, but if one is superior then we definitely
352 : * should keep that one.
353 : */
354 393982 : cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
355 393982 : if (cmp > 0 ||
356 374 : (cmp == 0 &&
357 374 : compare_pathkeys(cheapest_startup_path->pathkeys,
358 : path->pathkeys) == PATHKEYS_BETTER2))
359 78302 : cheapest_startup_path = path;
360 :
361 393982 : cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
362 393982 : if (cmp > 0 ||
363 48 : (cmp == 0 &&
364 48 : compare_pathkeys(cheapest_total_path->pathkeys,
365 : path->pathkeys) == PATHKEYS_BETTER2))
366 0 : cheapest_total_path = path;
367 : }
368 : }
369 :
370 : /* Add cheapest unparameterized path, if any, to parameterized_paths */
371 2005314 : if (cheapest_total_path)
372 1993748 : parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
373 :
374 : /*
375 : * If there is no unparameterized path, use the best parameterized path as
376 : * cheapest_total_path (but not as cheapest_startup_path).
377 : */
378 2005314 : if (cheapest_total_path == NULL)
379 11566 : cheapest_total_path = best_param_path;
380 : Assert(cheapest_total_path != NULL);
381 :
382 2005314 : parent_rel->cheapest_startup_path = cheapest_startup_path;
383 2005314 : parent_rel->cheapest_total_path = cheapest_total_path;
384 2005314 : parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
385 2005314 : parent_rel->cheapest_parameterized_paths = parameterized_paths;
386 2005314 : }
387 :
388 : /*
389 : * add_path
390 : * Consider a potential implementation path for the specified parent rel,
391 : * and add it to the rel's pathlist if it is worthy of consideration.
392 : *
393 : * A path is worthy if it has a better sort order (better pathkeys) or
394 : * cheaper cost (as defined below), or generates fewer rows, than any
395 : * existing path that has the same or superset parameterization rels. We
396 : * also consider parallel-safe paths more worthy than others.
397 : *
398 : * Cheaper cost can mean either a cheaper total cost or a cheaper startup
399 : * cost; if one path is cheaper in one of these aspects and another is
400 : * cheaper in the other, we keep both. However, when some path type is
401 : * disabled (e.g. due to enable_seqscan=false), the number of times that
402 : * a disabled path type is used is considered to be a higher-order
403 : * component of the cost. Hence, if path A uses no disabled path type,
404 : * and path B uses 1 or more disabled path types, A is cheaper, no matter
405 : * what we estimate for the startup and total costs. The startup and total
406 : * cost essentially act as a tiebreak when comparing paths that use equal
407 : * numbers of disabled path nodes; but in practice this tiebreak is almost
408 : * always used, since normally no path types are disabled.
409 : *
410 : * In addition to possibly adding new_path, we also remove from the rel's
411 : * pathlist any old paths that are dominated by new_path --- that is,
412 : * new_path is cheaper, at least as well ordered, generates no more rows,
413 : * requires no outer rels not required by the old path, and is no less
414 : * parallel-safe.
415 : *
416 : * In most cases, a path with a superset parameterization will generate
417 : * fewer rows (since it has more join clauses to apply), so that those two
418 : * figures of merit move in opposite directions; this means that a path of
419 : * one parameterization can seldom dominate a path of another. But such
420 : * cases do arise, so we make the full set of checks anyway.
421 : *
422 : * There are two policy decisions embedded in this function, along with
423 : * its sibling add_path_precheck. First, we treat all parameterized paths
424 : * as having NIL pathkeys, so that they cannot win comparisons on the
425 : * basis of sort order. This is to reduce the number of parameterized
426 : * paths that are kept; see discussion in src/backend/optimizer/README.
427 : *
428 : * Second, we only consider cheap startup cost to be interesting if
429 : * parent_rel->consider_startup is true for an unparameterized path, or
430 : * parent_rel->consider_param_startup is true for a parameterized one.
431 : * Again, this allows discarding useless paths sooner.
432 : *
433 : * The pathlist is kept sorted by disabled_nodes and then by total_cost,
434 : * with cheaper paths at the front. Within this routine, that's simply a
435 : * speed hack: doing it that way makes it more likely that we will reject
436 : * an inferior path after a few comparisons, rather than many comparisons.
437 : * However, add_path_precheck relies on this ordering to exit early
438 : * when possible.
439 : *
440 : * NOTE: discarded Path objects are immediately pfree'd to reduce planner
441 : * memory consumption. We dare not try to free the substructure of a Path,
442 : * since much of it may be shared with other Paths or the query tree itself;
443 : * but just recycling discarded Path nodes is a very useful savings in
444 : * a large join tree. We can recycle the List nodes of pathlist, too.
445 : *
446 : * As noted in optimizer/README, deleting a previously-accepted Path is
447 : * safe because we know that Paths of this rel cannot yet be referenced
448 : * from any other rel, such as a higher-level join. However, in some cases
449 : * it is possible that a Path is referenced by another Path for its own
450 : * rel; we must not delete such a Path, even if it is dominated by the new
451 : * Path. Currently this occurs only for IndexPath objects, which may be
452 : * referenced as children of BitmapHeapPaths as well as being paths in
453 : * their own right. Hence, we don't pfree IndexPaths when rejecting them.
454 : *
455 : * 'parent_rel' is the relation entry to which the path corresponds.
456 : * 'new_path' is a potential path for parent_rel.
457 : *
458 : * Returns nothing, but modifies parent_rel->pathlist.
459 : */
460 : void
461 4160980 : add_path(RelOptInfo *parent_rel, Path *new_path)
462 : {
463 4160980 : bool accept_new = true; /* unless we find a superior old path */
464 4160980 : int insert_at = 0; /* where to insert new item */
465 : List *new_path_pathkeys;
466 : ListCell *p1;
467 :
468 : /*
469 : * This is a convenient place to check for query cancel --- no part of the
470 : * planner goes very long without calling add_path().
471 : */
472 4160980 : CHECK_FOR_INTERRUPTS();
473 :
474 : /* Pretend parameterized paths have no pathkeys, per comment above */
475 4160980 : new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
476 :
477 : /*
478 : * Loop to check proposed new path against old paths. Note it is possible
479 : * for more than one old path to be tossed out because new_path dominates
480 : * it.
481 : */
482 6393860 : foreach(p1, parent_rel->pathlist)
483 : {
484 3755066 : Path *old_path = (Path *) lfirst(p1);
485 3755066 : bool remove_old = false; /* unless new proves superior */
486 : PathCostComparison costcmp;
487 : PathKeysComparison keyscmp;
488 : BMS_Comparison outercmp;
489 :
490 : /*
491 : * Do a fuzzy cost comparison with standard fuzziness limit.
492 : */
493 3755066 : costcmp = compare_path_costs_fuzzily(new_path, old_path,
494 : STD_FUZZ_FACTOR);
495 :
496 : /*
497 : * If the two paths compare differently for startup and total cost,
498 : * then we want to keep both, and we can skip comparing pathkeys and
499 : * required_outer rels. If they compare the same, proceed with the
500 : * other comparisons. Row count is checked last. (We make the tests
501 : * in this order because the cost comparison is most likely to turn
502 : * out "different", and the pathkeys comparison next most likely. As
503 : * explained above, row count very seldom makes a difference, so even
504 : * though it's cheap to compare there's not much point in checking it
505 : * earlier.)
506 : */
507 3755066 : if (costcmp != COSTS_DIFFERENT)
508 : {
509 : /* Similarly check to see if either dominates on pathkeys */
510 : List *old_path_pathkeys;
511 :
512 3634718 : old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
513 3634718 : keyscmp = compare_pathkeys(new_path_pathkeys,
514 : old_path_pathkeys);
515 3634718 : if (keyscmp != PATHKEYS_DIFFERENT)
516 : {
517 3457070 : switch (costcmp)
518 : {
519 345900 : case COSTS_EQUAL:
520 345900 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
521 345900 : PATH_REQ_OUTER(old_path));
522 345900 : if (keyscmp == PATHKEYS_BETTER1)
523 : {
524 3402 : if ((outercmp == BMS_EQUAL ||
525 3402 : outercmp == BMS_SUBSET1) &&
526 3402 : new_path->rows <= old_path->rows &&
527 3394 : new_path->parallel_safe >= old_path->parallel_safe)
528 3394 : remove_old = true; /* new dominates old */
529 : }
530 342498 : else if (keyscmp == PATHKEYS_BETTER2)
531 : {
532 15312 : if ((outercmp == BMS_EQUAL ||
533 15312 : outercmp == BMS_SUBSET2) &&
534 15312 : new_path->rows >= old_path->rows &&
535 15312 : new_path->parallel_safe <= old_path->parallel_safe)
536 15312 : accept_new = false; /* old dominates new */
537 : }
538 : else /* keyscmp == PATHKEYS_EQUAL */
539 : {
540 327186 : if (outercmp == BMS_EQUAL)
541 : {
542 : /*
543 : * Same pathkeys and outer rels, and fuzzily
544 : * the same cost, so keep just one; to decide
545 : * which, first check parallel-safety, then
546 : * rows, then do a fuzzy cost comparison with
547 : * very small fuzz limit. (We used to do an
548 : * exact cost comparison, but that results in
549 : * annoying platform-specific plan variations
550 : * due to roundoff in the cost estimates.) If
551 : * things are still tied, arbitrarily keep
552 : * only the old path. Notice that we will
553 : * keep only the old path even if the
554 : * less-fuzzy comparison decides the startup
555 : * and total costs compare differently.
556 : */
557 321836 : if (new_path->parallel_safe >
558 321836 : old_path->parallel_safe)
559 42 : remove_old = true; /* new dominates old */
560 321794 : else if (new_path->parallel_safe <
561 321794 : old_path->parallel_safe)
562 54 : accept_new = false; /* old dominates new */
563 321740 : else if (new_path->rows < old_path->rows)
564 0 : remove_old = true; /* new dominates old */
565 321740 : else if (new_path->rows > old_path->rows)
566 178 : accept_new = false; /* old dominates new */
567 321562 : else if (compare_path_costs_fuzzily(new_path,
568 : old_path,
569 : 1.0000000001) == COSTS_BETTER1)
570 15270 : remove_old = true; /* new dominates old */
571 : else
572 306292 : accept_new = false; /* old equals or
573 : * dominates new */
574 : }
575 5350 : else if (outercmp == BMS_SUBSET1 &&
576 1232 : new_path->rows <= old_path->rows &&
577 1216 : new_path->parallel_safe >= old_path->parallel_safe)
578 1216 : remove_old = true; /* new dominates old */
579 4134 : else if (outercmp == BMS_SUBSET2 &&
580 3558 : new_path->rows >= old_path->rows &&
581 3524 : new_path->parallel_safe <= old_path->parallel_safe)
582 3524 : accept_new = false; /* old dominates new */
583 : /* else different parameterizations, keep both */
584 : }
585 345900 : break;
586 1007996 : case COSTS_BETTER1:
587 1007996 : if (keyscmp != PATHKEYS_BETTER2)
588 : {
589 704098 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
590 704098 : PATH_REQ_OUTER(old_path));
591 704098 : if ((outercmp == BMS_EQUAL ||
592 601704 : outercmp == BMS_SUBSET1) &&
593 601704 : new_path->rows <= old_path->rows &&
594 596712 : new_path->parallel_safe >= old_path->parallel_safe)
595 594206 : remove_old = true; /* new dominates old */
596 : }
597 1007996 : break;
598 2103174 : case COSTS_BETTER2:
599 2103174 : if (keyscmp != PATHKEYS_BETTER1)
600 : {
601 1338622 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
602 1338622 : PATH_REQ_OUTER(old_path));
603 1338622 : if ((outercmp == BMS_EQUAL ||
604 1256150 : outercmp == BMS_SUBSET2) &&
605 1256150 : new_path->rows >= old_path->rows &&
606 1198558 : new_path->parallel_safe <= old_path->parallel_safe)
607 1196826 : accept_new = false; /* old dominates new */
608 : }
609 2103174 : break;
610 0 : case COSTS_DIFFERENT:
611 :
612 : /*
613 : * can't get here, but keep this case to keep compiler
614 : * quiet
615 : */
616 0 : break;
617 : }
618 : }
619 : }
620 :
621 : /*
622 : * Remove current element from pathlist if dominated by new.
623 : */
624 3755066 : if (remove_old)
625 : {
626 614128 : parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
627 : p1);
628 :
629 : /*
630 : * Delete the data pointed-to by the deleted cell, if possible
631 : */
632 614128 : if (!IsA(old_path, IndexPath))
633 596502 : pfree(old_path);
634 : }
635 : else
636 : {
637 : /*
638 : * new belongs after this old path if it has more disabled nodes
639 : * or if it has the same number of nodes but a greater total cost
640 : */
641 3140938 : if (new_path->disabled_nodes > old_path->disabled_nodes ||
642 3127074 : (new_path->disabled_nodes == old_path->disabled_nodes &&
643 3126214 : new_path->total_cost >= old_path->total_cost))
644 2615696 : insert_at = foreach_current_index(p1) + 1;
645 : }
646 :
647 : /*
648 : * If we found an old path that dominates new_path, we can quit
649 : * scanning the pathlist; we will not add new_path, and we assume
650 : * new_path cannot dominate any other elements of the pathlist.
651 : */
652 3755066 : if (!accept_new)
653 1522186 : break;
654 : }
655 :
656 4160980 : if (accept_new)
657 : {
658 : /* Accept the new path: insert it at proper place in pathlist */
659 2638794 : parent_rel->pathlist =
660 2638794 : list_insert_nth(parent_rel->pathlist, insert_at, new_path);
661 : }
662 : else
663 : {
664 : /* Reject and recycle the new path */
665 1522186 : if (!IsA(new_path, IndexPath))
666 1428346 : pfree(new_path);
667 : }
668 4160980 : }
669 :
670 : /*
671 : * add_path_precheck
672 : * Check whether a proposed new path could possibly get accepted.
673 : * We assume we know the path's pathkeys and parameterization accurately,
674 : * and have lower bounds for its costs.
675 : *
676 : * Note that we do not know the path's rowcount, since getting an estimate for
677 : * that is too expensive to do before prechecking. We assume here that paths
678 : * of a superset parameterization will generate fewer rows; if that holds,
679 : * then paths with different parameterizations cannot dominate each other
680 : * and so we can simply ignore existing paths of another parameterization.
681 : * (In the infrequent cases where that rule of thumb fails, add_path will
682 : * get rid of the inferior path.)
683 : *
684 : * At the time this is called, we haven't actually built a Path structure,
685 : * so the required information has to be passed piecemeal.
686 : */
687 : bool
688 4424816 : add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes,
689 : Cost startup_cost, Cost total_cost,
690 : List *pathkeys, Relids required_outer)
691 : {
692 : List *new_path_pathkeys;
693 : bool consider_startup;
694 : ListCell *p1;
695 :
696 : /* Pretend parameterized paths have no pathkeys, per add_path policy */
697 4424816 : new_path_pathkeys = required_outer ? NIL : pathkeys;
698 :
699 : /* Decide whether new path's startup cost is interesting */
700 4424816 : consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
701 :
702 5757972 : foreach(p1, parent_rel->pathlist)
703 : {
704 5464632 : Path *old_path = (Path *) lfirst(p1);
705 : PathKeysComparison keyscmp;
706 :
707 : /*
708 : * Since the pathlist is sorted by disabled_nodes and then by
709 : * total_cost, we can stop looking once we reach a path with more
710 : * disabled nodes, or the same number of disabled nodes plus a
711 : * total_cost larger than the new path's.
712 : */
713 5464632 : if (unlikely(old_path->disabled_nodes != disabled_nodes))
714 : {
715 11904 : if (disabled_nodes < old_path->disabled_nodes)
716 318 : break;
717 : }
718 5452728 : else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
719 1585588 : break;
720 :
721 : /*
722 : * We are looking for an old_path with the same parameterization (and
723 : * by assumption the same rowcount) that dominates the new path on
724 : * pathkeys as well as both cost metrics. If we find one, we can
725 : * reject the new path.
726 : *
727 : * Cost comparisons here should match compare_path_costs_fuzzily.
728 : */
729 : /* new path can win on startup cost only if consider_startup */
730 3878726 : if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
731 1847428 : !consider_startup)
732 : {
733 : /* new path loses on cost, so check pathkeys... */
734 : List *old_path_pathkeys;
735 :
736 3785574 : old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
737 3785574 : keyscmp = compare_pathkeys(new_path_pathkeys,
738 : old_path_pathkeys);
739 3785574 : if (keyscmp == PATHKEYS_EQUAL ||
740 : keyscmp == PATHKEYS_BETTER2)
741 : {
742 : /* new path does not win on pathkeys... */
743 2605018 : if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
744 : {
745 : /* Found an old path that dominates the new one */
746 2545570 : return false;
747 : }
748 : }
749 : }
750 : }
751 :
752 1879246 : return true;
753 : }
754 :
755 : /*
756 : * add_partial_path
757 : * Like add_path, our goal here is to consider whether a path is worthy
758 : * of being kept around, but the considerations here are a bit different.
759 : * A partial path is one which can be executed in any number of workers in
760 : * parallel such that each worker will generate a subset of the path's
761 : * overall result.
762 : *
763 : * As in add_path, the partial_pathlist is kept sorted with the cheapest
764 : * total path in front. This is depended on by multiple places, which
765 : * just take the front entry as the cheapest path without searching.
766 : *
767 : * We don't generate parameterized partial paths for several reasons. Most
768 : * importantly, they're not safe to execute, because there's nothing to
769 : * make sure that a parallel scan within the parameterized portion of the
770 : * plan is running with the same value in every worker at the same time.
771 : * Fortunately, it seems unlikely to be worthwhile anyway, because having
772 : * each worker scan the entire outer relation and a subset of the inner
773 : * relation will generally be a terrible plan. The inner (parameterized)
774 : * side of the plan will be small anyway. There could be rare cases where
775 : * this wins big - e.g. if join order constraints put a 1-row relation on
776 : * the outer side of the topmost join with a parameterized plan on the inner
777 : * side - but we'll have to be content not to handle such cases until
778 : * somebody builds an executor infrastructure that can cope with them.
779 : *
780 : * Because we don't consider parameterized paths here, we also don't
781 : * need to consider the row counts as a measure of quality: every path will
782 : * produce the same number of rows. Neither do we need to consider startup
783 : * costs: parallelism is only used for plans that will be run to completion.
784 : * Therefore, this routine is much simpler than add_path: it needs to
785 : * consider only disabled nodes, pathkeys and total cost.
786 : *
787 : * As with add_path, we pfree paths that are found to be dominated by
788 : * another partial path; this requires that there be no other references to
789 : * such paths yet. Hence, GatherPaths must not be created for a rel until
790 : * we're done creating all partial paths for it. Unlike add_path, we don't
791 : * take an exception for IndexPaths as partial index paths won't be
792 : * referenced by partial BitmapHeapPaths.
793 : */
794 : void
795 102682 : add_partial_path(RelOptInfo *parent_rel, Path *new_path)
796 : {
797 102682 : bool accept_new = true; /* unless we find a superior old path */
798 102682 : int insert_at = 0; /* where to insert new item */
799 : ListCell *p1;
800 :
801 : /* Check for query cancel. */
802 102682 : CHECK_FOR_INTERRUPTS();
803 :
804 : /* Path to be added must be parallel safe. */
805 : Assert(new_path->parallel_safe);
806 :
807 : /* Relation should be OK for parallelism, too. */
808 : Assert(parent_rel->consider_parallel);
809 :
810 : /*
811 : * As in add_path, throw out any paths which are dominated by the new
812 : * path, but throw out the new path if some existing path dominates it.
813 : */
814 137044 : foreach(p1, parent_rel->partial_pathlist)
815 : {
816 53460 : Path *old_path = (Path *) lfirst(p1);
817 53460 : bool remove_old = false; /* unless new proves superior */
818 : PathKeysComparison keyscmp;
819 :
820 : /* Compare pathkeys. */
821 53460 : keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
822 :
823 : /* Unless pathkeys are incompatible, keep just one of the two paths. */
824 53460 : if (keyscmp != PATHKEYS_DIFFERENT)
825 : {
826 53250 : if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
827 : {
828 1484 : if (new_path->disabled_nodes > old_path->disabled_nodes)
829 956 : accept_new = false;
830 : else
831 528 : remove_old = true;
832 : }
833 51766 : else if (new_path->total_cost > old_path->total_cost
834 51766 : * STD_FUZZ_FACTOR)
835 : {
836 : /* New path costs more; keep it only if pathkeys are better. */
837 18164 : if (keyscmp != PATHKEYS_BETTER1)
838 9478 : accept_new = false;
839 : }
840 33602 : else if (old_path->total_cost > new_path->total_cost
841 33602 : * STD_FUZZ_FACTOR)
842 : {
843 : /* Old path costs more; keep it only if pathkeys are better. */
844 24474 : if (keyscmp != PATHKEYS_BETTER2)
845 12606 : remove_old = true;
846 : }
847 9128 : else if (keyscmp == PATHKEYS_BETTER1)
848 : {
849 : /* Costs are about the same, new path has better pathkeys. */
850 0 : remove_old = true;
851 : }
852 9128 : else if (keyscmp == PATHKEYS_BETTER2)
853 : {
854 : /* Costs are about the same, old path has better pathkeys. */
855 1740 : accept_new = false;
856 : }
857 7388 : else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
858 : {
859 : /* Pathkeys are the same, and the old path costs more. */
860 464 : remove_old = true;
861 : }
862 : else
863 : {
864 : /*
865 : * Pathkeys are the same, and new path isn't materially
866 : * cheaper.
867 : */
868 6924 : accept_new = false;
869 : }
870 : }
871 :
872 : /*
873 : * Remove current element from partial_pathlist if dominated by new.
874 : */
875 53460 : if (remove_old)
876 : {
877 13598 : parent_rel->partial_pathlist =
878 13598 : foreach_delete_current(parent_rel->partial_pathlist, p1);
879 13598 : pfree(old_path);
880 : }
881 : else
882 : {
883 : /* new belongs after this old path if it has cost >= old's */
884 39862 : if (new_path->total_cost >= old_path->total_cost)
885 27008 : insert_at = foreach_current_index(p1) + 1;
886 : }
887 :
888 : /*
889 : * If we found an old path that dominates new_path, we can quit
890 : * scanning the partial_pathlist; we will not add new_path, and we
891 : * assume new_path cannot dominate any later path.
892 : */
893 53460 : if (!accept_new)
894 19098 : break;
895 : }
896 :
897 102682 : if (accept_new)
898 : {
899 : /* Accept the new path: insert it at proper place */
900 83584 : parent_rel->partial_pathlist =
901 83584 : list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
902 : }
903 : else
904 : {
905 : /* Reject and recycle the new path */
906 19098 : pfree(new_path);
907 : }
908 102682 : }
909 :
910 : /*
911 : * add_partial_path_precheck
912 : * Check whether a proposed new partial path could possibly get accepted.
913 : *
914 : * Unlike add_path_precheck, we can ignore startup cost and parameterization,
915 : * since they don't matter for partial paths (see add_partial_path). But
916 : * we do want to make sure we don't add a partial path if there's already
917 : * a complete path that dominates it, since in that case the proposed path
918 : * is surely a loser.
919 : */
920 : bool
921 82660 : add_partial_path_precheck(RelOptInfo *parent_rel, int disabled_nodes,
922 : Cost total_cost, List *pathkeys)
923 : {
924 : ListCell *p1;
925 :
926 : /*
927 : * Our goal here is twofold. First, we want to find out whether this path
928 : * is clearly inferior to some existing partial path. If so, we want to
929 : * reject it immediately. Second, we want to find out whether this path
930 : * is clearly superior to some existing partial path -- at least, modulo
931 : * final cost computations. If so, we definitely want to consider it.
932 : *
933 : * Unlike add_path(), we always compare pathkeys here. This is because we
934 : * expect partial_pathlist to be very short, and getting a definitive
935 : * answer at this stage avoids the need to call add_path_precheck.
936 : */
937 112896 : foreach(p1, parent_rel->partial_pathlist)
938 : {
939 91902 : Path *old_path = (Path *) lfirst(p1);
940 : PathKeysComparison keyscmp;
941 :
942 91902 : keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
943 91902 : if (keyscmp != PATHKEYS_DIFFERENT)
944 : {
945 91710 : if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
946 : keyscmp != PATHKEYS_BETTER1)
947 61666 : return false;
948 44558 : if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
949 : keyscmp != PATHKEYS_BETTER2)
950 14514 : return true;
951 : }
952 : }
953 :
954 : /*
955 : * This path is neither clearly inferior to an existing partial path nor
956 : * clearly good enough that it might replace one. Compare it to
957 : * non-parallel plans. If it loses even before accounting for the cost of
958 : * the Gather node, we should definitely reject it.
959 : *
960 : * Note that we pass the total_cost to add_path_precheck twice. This is
961 : * because it's never advantageous to consider the startup cost of a
962 : * partial path; the resulting plans, if run in parallel, will be run to
963 : * completion.
964 : */
965 20994 : if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
966 : pathkeys, NULL))
967 2080 : return false;
968 :
969 18914 : return true;
970 : }
971 :
972 :
973 : /*****************************************************************************
974 : * PATH NODE CREATION ROUTINES
975 : *****************************************************************************/
976 :
977 : /*
978 : * create_seqscan_path
979 : * Creates a path corresponding to a sequential scan, returning the
980 : * pathnode.
981 : */
982 : Path *
983 415312 : create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
984 : Relids required_outer, int parallel_workers)
985 : {
986 415312 : Path *pathnode = makeNode(Path);
987 :
988 415312 : pathnode->pathtype = T_SeqScan;
989 415312 : pathnode->parent = rel;
990 415312 : pathnode->pathtarget = rel->reltarget;
991 415312 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
992 : required_outer);
993 415312 : pathnode->parallel_aware = (parallel_workers > 0);
994 415312 : pathnode->parallel_safe = rel->consider_parallel;
995 415312 : pathnode->parallel_workers = parallel_workers;
996 415312 : pathnode->pathkeys = NIL; /* seqscan has unordered result */
997 :
998 415312 : cost_seqscan(pathnode, root, rel, pathnode->param_info);
999 :
1000 415312 : return pathnode;
1001 : }
1002 :
1003 : /*
1004 : * create_samplescan_path
1005 : * Creates a path node for a sampled table scan.
1006 : */
1007 : Path *
1008 306 : create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
1009 : {
1010 306 : Path *pathnode = makeNode(Path);
1011 :
1012 306 : pathnode->pathtype = T_SampleScan;
1013 306 : pathnode->parent = rel;
1014 306 : pathnode->pathtarget = rel->reltarget;
1015 306 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1016 : required_outer);
1017 306 : pathnode->parallel_aware = false;
1018 306 : pathnode->parallel_safe = rel->consider_parallel;
1019 306 : pathnode->parallel_workers = 0;
1020 306 : pathnode->pathkeys = NIL; /* samplescan has unordered result */
1021 :
1022 306 : cost_samplescan(pathnode, root, rel, pathnode->param_info);
1023 :
1024 306 : return pathnode;
1025 : }
1026 :
1027 : /*
1028 : * create_index_path
1029 : * Creates a path node for an index scan.
1030 : *
1031 : * 'index' is a usable index.
1032 : * 'indexclauses' is a list of IndexClause nodes representing clauses
1033 : * to be enforced as qual conditions in the scan.
1034 : * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
1035 : * to be used as index ordering operators in the scan.
1036 : * 'indexorderbycols' is an integer list of index column numbers (zero based)
1037 : * the ordering operators can be used with.
1038 : * 'pathkeys' describes the ordering of the path.
1039 : * 'indexscandir' is either ForwardScanDirection or BackwardScanDirection.
1040 : * 'indexonly' is true if an index-only scan is wanted.
1041 : * 'required_outer' is the set of outer relids for a parameterized path.
1042 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1043 : * estimates of caching behavior.
1044 : * 'partial_path' is true if constructing a parallel index scan path.
1045 : *
1046 : * Returns the new path node.
1047 : */
1048 : IndexPath *
1049 755792 : create_index_path(PlannerInfo *root,
1050 : IndexOptInfo *index,
1051 : List *indexclauses,
1052 : List *indexorderbys,
1053 : List *indexorderbycols,
1054 : List *pathkeys,
1055 : ScanDirection indexscandir,
1056 : bool indexonly,
1057 : Relids required_outer,
1058 : double loop_count,
1059 : bool partial_path)
1060 : {
1061 755792 : IndexPath *pathnode = makeNode(IndexPath);
1062 755792 : RelOptInfo *rel = index->rel;
1063 :
1064 755792 : pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1065 755792 : pathnode->path.parent = rel;
1066 755792 : pathnode->path.pathtarget = rel->reltarget;
1067 755792 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1068 : required_outer);
1069 755792 : pathnode->path.parallel_aware = false;
1070 755792 : pathnode->path.parallel_safe = rel->consider_parallel;
1071 755792 : pathnode->path.parallel_workers = 0;
1072 755792 : pathnode->path.pathkeys = pathkeys;
1073 :
1074 755792 : pathnode->indexinfo = index;
1075 755792 : pathnode->indexclauses = indexclauses;
1076 755792 : pathnode->indexorderbys = indexorderbys;
1077 755792 : pathnode->indexorderbycols = indexorderbycols;
1078 755792 : pathnode->indexscandir = indexscandir;
1079 :
1080 755792 : cost_index(pathnode, root, loop_count, partial_path);
1081 :
1082 755792 : return pathnode;
1083 : }
1084 :
1085 : /*
1086 : * create_bitmap_heap_path
1087 : * Creates a path node for a bitmap scan.
1088 : *
1089 : * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
1090 : * 'required_outer' is the set of outer relids for a parameterized path.
1091 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1092 : * estimates of caching behavior.
1093 : *
1094 : * loop_count should match the value used when creating the component
1095 : * IndexPaths.
1096 : */
1097 : BitmapHeapPath *
1098 330700 : create_bitmap_heap_path(PlannerInfo *root,
1099 : RelOptInfo *rel,
1100 : Path *bitmapqual,
1101 : Relids required_outer,
1102 : double loop_count,
1103 : int parallel_degree)
1104 : {
1105 330700 : BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1106 :
1107 330700 : pathnode->path.pathtype = T_BitmapHeapScan;
1108 330700 : pathnode->path.parent = rel;
1109 330700 : pathnode->path.pathtarget = rel->reltarget;
1110 330700 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1111 : required_outer);
1112 330700 : pathnode->path.parallel_aware = (parallel_degree > 0);
1113 330700 : pathnode->path.parallel_safe = rel->consider_parallel;
1114 330700 : pathnode->path.parallel_workers = parallel_degree;
1115 330700 : pathnode->path.pathkeys = NIL; /* always unordered */
1116 :
1117 330700 : pathnode->bitmapqual = bitmapqual;
1118 :
1119 330700 : cost_bitmap_heap_scan(&pathnode->path, root, rel,
1120 : pathnode->path.param_info,
1121 : bitmapqual, loop_count);
1122 :
1123 330700 : return pathnode;
1124 : }
1125 :
1126 : /*
1127 : * create_bitmap_and_path
1128 : * Creates a path node representing a BitmapAnd.
1129 : */
1130 : BitmapAndPath *
1131 49718 : create_bitmap_and_path(PlannerInfo *root,
1132 : RelOptInfo *rel,
1133 : List *bitmapquals)
1134 : {
1135 49718 : BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1136 49718 : Relids required_outer = NULL;
1137 : ListCell *lc;
1138 :
1139 49718 : pathnode->path.pathtype = T_BitmapAnd;
1140 49718 : pathnode->path.parent = rel;
1141 49718 : pathnode->path.pathtarget = rel->reltarget;
1142 :
1143 : /*
1144 : * Identify the required outer rels as the union of what the child paths
1145 : * depend on. (Alternatively, we could insist that the caller pass this
1146 : * in, but it's more convenient and reliable to compute it here.)
1147 : */
1148 149154 : foreach(lc, bitmapquals)
1149 : {
1150 99436 : Path *bitmapqual = (Path *) lfirst(lc);
1151 :
1152 99436 : required_outer = bms_add_members(required_outer,
1153 99436 : PATH_REQ_OUTER(bitmapqual));
1154 : }
1155 49718 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1156 : required_outer);
1157 :
1158 : /*
1159 : * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1160 : * parallel-safe if and only if rel->consider_parallel is set. So, we can
1161 : * set the flag for this path based only on the relation-level flag,
1162 : * without actually iterating over the list of children.
1163 : */
1164 49718 : pathnode->path.parallel_aware = false;
1165 49718 : pathnode->path.parallel_safe = rel->consider_parallel;
1166 49718 : pathnode->path.parallel_workers = 0;
1167 :
1168 49718 : pathnode->path.pathkeys = NIL; /* always unordered */
1169 :
1170 49718 : pathnode->bitmapquals = bitmapquals;
1171 :
1172 : /* this sets bitmapselectivity as well as the regular cost fields: */
1173 49718 : cost_bitmap_and_node(pathnode, root);
1174 :
1175 49718 : return pathnode;
1176 : }
1177 :
1178 : /*
1179 : * create_bitmap_or_path
1180 : * Creates a path node representing a BitmapOr.
1181 : */
1182 : BitmapOrPath *
1183 1016 : create_bitmap_or_path(PlannerInfo *root,
1184 : RelOptInfo *rel,
1185 : List *bitmapquals)
1186 : {
1187 1016 : BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1188 1016 : Relids required_outer = NULL;
1189 : ListCell *lc;
1190 :
1191 1016 : pathnode->path.pathtype = T_BitmapOr;
1192 1016 : pathnode->path.parent = rel;
1193 1016 : pathnode->path.pathtarget = rel->reltarget;
1194 :
1195 : /*
1196 : * Identify the required outer rels as the union of what the child paths
1197 : * depend on. (Alternatively, we could insist that the caller pass this
1198 : * in, but it's more convenient and reliable to compute it here.)
1199 : */
1200 2850 : foreach(lc, bitmapquals)
1201 : {
1202 1834 : Path *bitmapqual = (Path *) lfirst(lc);
1203 :
1204 1834 : required_outer = bms_add_members(required_outer,
1205 1834 : PATH_REQ_OUTER(bitmapqual));
1206 : }
1207 1016 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1208 : required_outer);
1209 :
1210 : /*
1211 : * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1212 : * parallel-safe if and only if rel->consider_parallel is set. So, we can
1213 : * set the flag for this path based only on the relation-level flag,
1214 : * without actually iterating over the list of children.
1215 : */
1216 1016 : pathnode->path.parallel_aware = false;
1217 1016 : pathnode->path.parallel_safe = rel->consider_parallel;
1218 1016 : pathnode->path.parallel_workers = 0;
1219 :
1220 1016 : pathnode->path.pathkeys = NIL; /* always unordered */
1221 :
1222 1016 : pathnode->bitmapquals = bitmapquals;
1223 :
1224 : /* this sets bitmapselectivity as well as the regular cost fields: */
1225 1016 : cost_bitmap_or_node(pathnode, root);
1226 :
1227 1016 : return pathnode;
1228 : }
1229 :
1230 : /*
1231 : * create_tidscan_path
1232 : * Creates a path corresponding to a scan by TID, returning the pathnode.
1233 : */
1234 : TidPath *
1235 860 : create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals,
1236 : Relids required_outer)
1237 : {
1238 860 : TidPath *pathnode = makeNode(TidPath);
1239 :
1240 860 : pathnode->path.pathtype = T_TidScan;
1241 860 : pathnode->path.parent = rel;
1242 860 : pathnode->path.pathtarget = rel->reltarget;
1243 860 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1244 : required_outer);
1245 860 : pathnode->path.parallel_aware = false;
1246 860 : pathnode->path.parallel_safe = rel->consider_parallel;
1247 860 : pathnode->path.parallel_workers = 0;
1248 860 : pathnode->path.pathkeys = NIL; /* always unordered */
1249 :
1250 860 : pathnode->tidquals = tidquals;
1251 :
1252 860 : cost_tidscan(&pathnode->path, root, rel, tidquals,
1253 : pathnode->path.param_info);
1254 :
1255 860 : return pathnode;
1256 : }
1257 :
1258 : /*
1259 : * create_tidrangescan_path
1260 : * Creates a path corresponding to a scan by a range of TIDs, returning
1261 : * the pathnode.
1262 : */
1263 : TidRangePath *
1264 1940 : create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel,
1265 : List *tidrangequals, Relids required_outer)
1266 : {
1267 1940 : TidRangePath *pathnode = makeNode(TidRangePath);
1268 :
1269 1940 : pathnode->path.pathtype = T_TidRangeScan;
1270 1940 : pathnode->path.parent = rel;
1271 1940 : pathnode->path.pathtarget = rel->reltarget;
1272 1940 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1273 : required_outer);
1274 1940 : pathnode->path.parallel_aware = false;
1275 1940 : pathnode->path.parallel_safe = rel->consider_parallel;
1276 1940 : pathnode->path.parallel_workers = 0;
1277 1940 : pathnode->path.pathkeys = NIL; /* always unordered */
1278 :
1279 1940 : pathnode->tidrangequals = tidrangequals;
1280 :
1281 1940 : cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1282 : pathnode->path.param_info);
1283 :
1284 1940 : return pathnode;
1285 : }
1286 :
1287 : /*
1288 : * create_append_path
1289 : * Creates a path corresponding to an Append plan, returning the
1290 : * pathnode.
1291 : *
1292 : * Note that we must handle subpaths = NIL, representing a dummy access path.
1293 : * Also, there are callers that pass root = NULL.
1294 : *
1295 : * 'rows', when passed as a non-negative number, will be used to overwrite the
1296 : * returned path's row estimate. Otherwise, the row estimate is calculated
1297 : * by totalling the row estimates from the 'subpaths' list.
1298 : */
1299 : AppendPath *
1300 76360 : create_append_path(PlannerInfo *root,
1301 : RelOptInfo *rel,
1302 : List *subpaths, List *partial_subpaths,
1303 : List *pathkeys, Relids required_outer,
1304 : int parallel_workers, bool parallel_aware,
1305 : double rows)
1306 : {
1307 76360 : AppendPath *pathnode = makeNode(AppendPath);
1308 : ListCell *l;
1309 :
1310 : Assert(!parallel_aware || parallel_workers > 0);
1311 :
1312 76360 : pathnode->path.pathtype = T_Append;
1313 76360 : pathnode->path.parent = rel;
1314 76360 : pathnode->path.pathtarget = rel->reltarget;
1315 :
1316 : /*
1317 : * If this is for a baserel (not a join or non-leaf partition), we prefer
1318 : * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1319 : * for the path. This supports building a Memoize path atop this path,
1320 : * and if this is a partitioned table the info may be useful for run-time
1321 : * pruning (cf make_partition_pruneinfo()).
1322 : *
1323 : * However, if we don't have "root" then that won't work and we fall back
1324 : * on the simpler get_appendrel_parampathinfo. There's no point in doing
1325 : * the more expensive thing for a dummy path, either.
1326 : */
1327 76360 : if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1328 38058 : pathnode->path.param_info = get_baserel_parampathinfo(root,
1329 : rel,
1330 : required_outer);
1331 : else
1332 38302 : pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1333 : required_outer);
1334 :
1335 76360 : pathnode->path.parallel_aware = parallel_aware;
1336 76360 : pathnode->path.parallel_safe = rel->consider_parallel;
1337 76360 : pathnode->path.parallel_workers = parallel_workers;
1338 76360 : pathnode->path.pathkeys = pathkeys;
1339 :
1340 : /*
1341 : * For parallel append, non-partial paths are sorted by descending total
1342 : * costs. That way, the total time to finish all non-partial paths is
1343 : * minimized. Also, the partial paths are sorted by descending startup
1344 : * costs. There may be some paths that require to do startup work by a
1345 : * single worker. In such case, it's better for workers to choose the
1346 : * expensive ones first, whereas the leader should choose the cheapest
1347 : * startup plan.
1348 : */
1349 76360 : if (pathnode->path.parallel_aware)
1350 : {
1351 : /*
1352 : * We mustn't fiddle with the order of subpaths when the Append has
1353 : * pathkeys. The order they're listed in is critical to keeping the
1354 : * pathkeys valid.
1355 : */
1356 : Assert(pathkeys == NIL);
1357 :
1358 25680 : list_sort(subpaths, append_total_cost_compare);
1359 25680 : list_sort(partial_subpaths, append_startup_cost_compare);
1360 : }
1361 76360 : pathnode->first_partial_path = list_length(subpaths);
1362 76360 : pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1363 :
1364 : /*
1365 : * Apply query-wide LIMIT if known and path is for sole base relation.
1366 : * (Handling this at this low level is a bit klugy.)
1367 : */
1368 76360 : if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1369 39186 : pathnode->limit_tuples = root->limit_tuples;
1370 : else
1371 37174 : pathnode->limit_tuples = -1.0;
1372 :
1373 250378 : foreach(l, pathnode->subpaths)
1374 : {
1375 174018 : Path *subpath = (Path *) lfirst(l);
1376 :
1377 308600 : pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1378 134582 : subpath->parallel_safe;
1379 :
1380 : /* All child paths must have same parameterization */
1381 : Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1382 : }
1383 :
1384 : Assert(!parallel_aware || pathnode->path.parallel_safe);
1385 :
1386 : /*
1387 : * If there's exactly one child path then the output of the Append is
1388 : * necessarily ordered the same as the child's, so we can inherit the
1389 : * child's pathkeys if any, overriding whatever the caller might've said.
1390 : * Furthermore, if the child's parallel awareness matches the Append's,
1391 : * then the Append is a no-op and will be discarded later (in setrefs.c).
1392 : * Then we can inherit the child's size and cost too, effectively charging
1393 : * zero for the Append. Otherwise, we must do the normal costsize
1394 : * calculation.
1395 : */
1396 76360 : if (list_length(pathnode->subpaths) == 1)
1397 : {
1398 22176 : Path *child = (Path *) linitial(pathnode->subpaths);
1399 :
1400 22176 : if (child->parallel_aware == parallel_aware)
1401 : {
1402 21738 : pathnode->path.rows = child->rows;
1403 21738 : pathnode->path.startup_cost = child->startup_cost;
1404 21738 : pathnode->path.total_cost = child->total_cost;
1405 : }
1406 : else
1407 438 : cost_append(pathnode, root);
1408 : /* Must do this last, else cost_append complains */
1409 22176 : pathnode->path.pathkeys = child->pathkeys;
1410 : }
1411 : else
1412 54184 : cost_append(pathnode, root);
1413 :
1414 : /* If the caller provided a row estimate, override the computed value. */
1415 76360 : if (rows >= 0)
1416 576 : pathnode->path.rows = rows;
1417 :
1418 76360 : return pathnode;
1419 : }
1420 :
1421 : /*
1422 : * append_total_cost_compare
1423 : * list_sort comparator for sorting append child paths
1424 : * by total_cost descending
1425 : *
1426 : * For equal total costs, we fall back to comparing startup costs; if those
1427 : * are equal too, break ties using bms_compare on the paths' relids.
1428 : * (This is to avoid getting unpredictable results from list_sort.)
1429 : */
1430 : static int
1431 4568 : append_total_cost_compare(const ListCell *a, const ListCell *b)
1432 : {
1433 4568 : Path *path1 = (Path *) lfirst(a);
1434 4568 : Path *path2 = (Path *) lfirst(b);
1435 : int cmp;
1436 :
1437 4568 : cmp = compare_path_costs(path1, path2, TOTAL_COST);
1438 4568 : if (cmp != 0)
1439 4292 : return -cmp;
1440 276 : return bms_compare(path1->parent->relids, path2->parent->relids);
1441 : }
1442 :
1443 : /*
1444 : * append_startup_cost_compare
1445 : * list_sort comparator for sorting append child paths
1446 : * by startup_cost descending
1447 : *
1448 : * For equal startup costs, we fall back to comparing total costs; if those
1449 : * are equal too, break ties using bms_compare on the paths' relids.
1450 : * (This is to avoid getting unpredictable results from list_sort.)
1451 : */
1452 : static int
1453 34156 : append_startup_cost_compare(const ListCell *a, const ListCell *b)
1454 : {
1455 34156 : Path *path1 = (Path *) lfirst(a);
1456 34156 : Path *path2 = (Path *) lfirst(b);
1457 : int cmp;
1458 :
1459 34156 : cmp = compare_path_costs(path1, path2, STARTUP_COST);
1460 34156 : if (cmp != 0)
1461 13446 : return -cmp;
1462 20710 : return bms_compare(path1->parent->relids, path2->parent->relids);
1463 : }
1464 :
1465 : /*
1466 : * create_merge_append_path
1467 : * Creates a path corresponding to a MergeAppend plan, returning the
1468 : * pathnode.
1469 : */
1470 : MergeAppendPath *
1471 4328 : create_merge_append_path(PlannerInfo *root,
1472 : RelOptInfo *rel,
1473 : List *subpaths,
1474 : List *pathkeys,
1475 : Relids required_outer)
1476 : {
1477 4328 : MergeAppendPath *pathnode = makeNode(MergeAppendPath);
1478 : int input_disabled_nodes;
1479 : Cost input_startup_cost;
1480 : Cost input_total_cost;
1481 : ListCell *l;
1482 :
1483 : /*
1484 : * We don't currently support parameterized MergeAppend paths, as
1485 : * explained in the comments for generate_orderedappend_paths.
1486 : */
1487 : Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer));
1488 :
1489 4328 : pathnode->path.pathtype = T_MergeAppend;
1490 4328 : pathnode->path.parent = rel;
1491 4328 : pathnode->path.pathtarget = rel->reltarget;
1492 4328 : pathnode->path.param_info = NULL;
1493 4328 : pathnode->path.parallel_aware = false;
1494 4328 : pathnode->path.parallel_safe = rel->consider_parallel;
1495 4328 : pathnode->path.parallel_workers = 0;
1496 4328 : pathnode->path.pathkeys = pathkeys;
1497 4328 : pathnode->subpaths = subpaths;
1498 :
1499 : /*
1500 : * Apply query-wide LIMIT if known and path is for sole base relation.
1501 : * (Handling this at this low level is a bit klugy.)
1502 : */
1503 4328 : if (bms_equal(rel->relids, root->all_query_rels))
1504 2190 : pathnode->limit_tuples = root->limit_tuples;
1505 : else
1506 2138 : pathnode->limit_tuples = -1.0;
1507 :
1508 : /*
1509 : * Add up the sizes and costs of the input paths.
1510 : */
1511 4328 : pathnode->path.rows = 0;
1512 4328 : input_disabled_nodes = 0;
1513 4328 : input_startup_cost = 0;
1514 4328 : input_total_cost = 0;
1515 16140 : foreach(l, subpaths)
1516 : {
1517 11812 : Path *subpath = (Path *) lfirst(l);
1518 : int presorted_keys;
1519 : Path sort_path; /* dummy for result of
1520 : * cost_sort/cost_incremental_sort */
1521 :
1522 : /* All child paths should be unparameterized */
1523 : Assert(bms_is_empty(PATH_REQ_OUTER(subpath)));
1524 :
1525 11812 : pathnode->path.rows += subpath->rows;
1526 20834 : pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1527 9022 : subpath->parallel_safe;
1528 :
1529 11812 : if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
1530 : &presorted_keys))
1531 : {
1532 : /*
1533 : * We'll need to insert a Sort node, so include costs for that. We
1534 : * choose to use incremental sort if it is enabled and there are
1535 : * presorted keys; otherwise we use full sort.
1536 : *
1537 : * We can use the parent's LIMIT if any, since we certainly won't
1538 : * pull more than that many tuples from any child.
1539 : */
1540 346 : if (enable_incremental_sort && presorted_keys > 0)
1541 : {
1542 18 : cost_incremental_sort(&sort_path,
1543 : root,
1544 : pathkeys,
1545 : presorted_keys,
1546 : subpath->disabled_nodes,
1547 : subpath->startup_cost,
1548 : subpath->total_cost,
1549 : subpath->rows,
1550 18 : subpath->pathtarget->width,
1551 : 0.0,
1552 : work_mem,
1553 : pathnode->limit_tuples);
1554 : }
1555 : else
1556 : {
1557 328 : cost_sort(&sort_path,
1558 : root,
1559 : pathkeys,
1560 : subpath->disabled_nodes,
1561 : subpath->total_cost,
1562 : subpath->rows,
1563 328 : subpath->pathtarget->width,
1564 : 0.0,
1565 : work_mem,
1566 : pathnode->limit_tuples);
1567 : }
1568 :
1569 346 : subpath = &sort_path;
1570 : }
1571 :
1572 11812 : input_disabled_nodes += subpath->disabled_nodes;
1573 11812 : input_startup_cost += subpath->startup_cost;
1574 11812 : input_total_cost += subpath->total_cost;
1575 : }
1576 :
1577 : /*
1578 : * Now we can compute total costs of the MergeAppend. If there's exactly
1579 : * one child path and its parallel awareness matches that of the
1580 : * MergeAppend, then the MergeAppend is a no-op and will be discarded
1581 : * later (in setrefs.c); otherwise we do the normal cost calculation.
1582 : */
1583 4328 : if (list_length(subpaths) == 1 &&
1584 110 : ((Path *) linitial(subpaths))->parallel_aware ==
1585 110 : pathnode->path.parallel_aware)
1586 : {
1587 110 : pathnode->path.disabled_nodes = input_disabled_nodes;
1588 110 : pathnode->path.startup_cost = input_startup_cost;
1589 110 : pathnode->path.total_cost = input_total_cost;
1590 : }
1591 : else
1592 4218 : cost_merge_append(&pathnode->path, root,
1593 : pathkeys, list_length(subpaths),
1594 : input_disabled_nodes,
1595 : input_startup_cost, input_total_cost,
1596 : pathnode->path.rows);
1597 :
1598 4328 : return pathnode;
1599 : }
1600 :
1601 : /*
1602 : * create_group_result_path
1603 : * Creates a path representing a Result-and-nothing-else plan.
1604 : *
1605 : * This is only used for degenerate grouping cases, in which we know we
1606 : * need to produce one result row, possibly filtered by a HAVING qual.
1607 : */
1608 : GroupResultPath *
1609 189584 : create_group_result_path(PlannerInfo *root, RelOptInfo *rel,
1610 : PathTarget *target, List *havingqual)
1611 : {
1612 189584 : GroupResultPath *pathnode = makeNode(GroupResultPath);
1613 :
1614 189584 : pathnode->path.pathtype = T_Result;
1615 189584 : pathnode->path.parent = rel;
1616 189584 : pathnode->path.pathtarget = target;
1617 189584 : pathnode->path.param_info = NULL; /* there are no other rels... */
1618 189584 : pathnode->path.parallel_aware = false;
1619 189584 : pathnode->path.parallel_safe = rel->consider_parallel;
1620 189584 : pathnode->path.parallel_workers = 0;
1621 189584 : pathnode->path.pathkeys = NIL;
1622 189584 : pathnode->quals = havingqual;
1623 :
1624 : /*
1625 : * We can't quite use cost_resultscan() because the quals we want to
1626 : * account for are not baserestrict quals of the rel. Might as well just
1627 : * hack it here.
1628 : */
1629 189584 : pathnode->path.rows = 1;
1630 189584 : pathnode->path.startup_cost = target->cost.startup;
1631 189584 : pathnode->path.total_cost = target->cost.startup +
1632 189584 : cpu_tuple_cost + target->cost.per_tuple;
1633 :
1634 : /*
1635 : * Add cost of qual, if any --- but we ignore its selectivity, since our
1636 : * rowcount estimate should be 1 no matter what the qual is.
1637 : */
1638 189584 : if (havingqual)
1639 : {
1640 : QualCost qual_cost;
1641 :
1642 616 : cost_qual_eval(&qual_cost, havingqual, root);
1643 : /* havingqual is evaluated once at startup */
1644 616 : pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1645 616 : pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1646 : }
1647 :
1648 189584 : return pathnode;
1649 : }
1650 :
1651 : /*
1652 : * create_material_path
1653 : * Creates a path corresponding to a Material plan, returning the
1654 : * pathnode.
1655 : */
1656 : MaterialPath *
1657 499312 : create_material_path(RelOptInfo *rel, Path *subpath)
1658 : {
1659 499312 : MaterialPath *pathnode = makeNode(MaterialPath);
1660 :
1661 : Assert(subpath->parent == rel);
1662 :
1663 499312 : pathnode->path.pathtype = T_Material;
1664 499312 : pathnode->path.parent = rel;
1665 499312 : pathnode->path.pathtarget = rel->reltarget;
1666 499312 : pathnode->path.param_info = subpath->param_info;
1667 499312 : pathnode->path.parallel_aware = false;
1668 945038 : pathnode->path.parallel_safe = rel->consider_parallel &&
1669 445726 : subpath->parallel_safe;
1670 499312 : pathnode->path.parallel_workers = subpath->parallel_workers;
1671 499312 : pathnode->path.pathkeys = subpath->pathkeys;
1672 :
1673 499312 : pathnode->subpath = subpath;
1674 :
1675 499312 : cost_material(&pathnode->path,
1676 : subpath->disabled_nodes,
1677 : subpath->startup_cost,
1678 : subpath->total_cost,
1679 : subpath->rows,
1680 499312 : subpath->pathtarget->width);
1681 :
1682 499312 : return pathnode;
1683 : }
1684 :
1685 : /*
1686 : * create_memoize_path
1687 : * Creates a path corresponding to a Memoize plan, returning the pathnode.
1688 : */
1689 : MemoizePath *
1690 291186 : create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1691 : List *param_exprs, List *hash_operators,
1692 : bool singlerow, bool binary_mode, Cardinality est_calls)
1693 : {
1694 291186 : MemoizePath *pathnode = makeNode(MemoizePath);
1695 :
1696 : Assert(subpath->parent == rel);
1697 :
1698 291186 : pathnode->path.pathtype = T_Memoize;
1699 291186 : pathnode->path.parent = rel;
1700 291186 : pathnode->path.pathtarget = rel->reltarget;
1701 291186 : pathnode->path.param_info = subpath->param_info;
1702 291186 : pathnode->path.parallel_aware = false;
1703 568518 : pathnode->path.parallel_safe = rel->consider_parallel &&
1704 277332 : subpath->parallel_safe;
1705 291186 : pathnode->path.parallel_workers = subpath->parallel_workers;
1706 291186 : pathnode->path.pathkeys = subpath->pathkeys;
1707 :
1708 291186 : pathnode->subpath = subpath;
1709 291186 : pathnode->hash_operators = hash_operators;
1710 291186 : pathnode->param_exprs = param_exprs;
1711 291186 : pathnode->singlerow = singlerow;
1712 291186 : pathnode->binary_mode = binary_mode;
1713 :
1714 : /*
1715 : * For now we set est_entries to 0. cost_memoize_rescan() does all the
1716 : * hard work to determine how many cache entries there are likely to be,
1717 : * so it seems best to leave it up to that function to fill this field in.
1718 : * If left at 0, the executor will make a guess at a good value.
1719 : */
1720 291186 : pathnode->est_entries = 0;
1721 :
1722 291186 : pathnode->est_calls = clamp_row_est(est_calls);
1723 :
1724 : /* These will also be set later in cost_memoize_rescan() */
1725 291186 : pathnode->est_unique_keys = 0.0;
1726 291186 : pathnode->est_hit_ratio = 0.0;
1727 :
1728 : /* we should not generate this path type when enable_memoize=false */
1729 : Assert(enable_memoize);
1730 291186 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
1731 :
1732 : /*
1733 : * Add a small additional charge for caching the first entry. All the
1734 : * harder calculations for rescans are performed in cost_memoize_rescan().
1735 : */
1736 291186 : pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1737 291186 : pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1738 291186 : pathnode->path.rows = subpath->rows;
1739 :
1740 291186 : return pathnode;
1741 : }
1742 :
1743 : /*
1744 : * create_unique_path
1745 : * Creates a path representing elimination of distinct rows from the
1746 : * input data. Distinct-ness is defined according to the needs of the
1747 : * semijoin represented by sjinfo. If it is not possible to identify
1748 : * how to make the data unique, NULL is returned.
1749 : *
1750 : * If used at all, this is likely to be called repeatedly on the same rel;
1751 : * and the input subpath should always be the same (the cheapest_total path
1752 : * for the rel). So we cache the result.
1753 : */
1754 : UniquePath *
1755 47986 : create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1756 : SpecialJoinInfo *sjinfo)
1757 : {
1758 : UniquePath *pathnode;
1759 : Path sort_path; /* dummy for result of cost_sort */
1760 : Path agg_path; /* dummy for result of cost_agg */
1761 : MemoryContext oldcontext;
1762 : int numCols;
1763 :
1764 : /* Caller made a mistake if subpath isn't cheapest_total ... */
1765 : Assert(subpath == rel->cheapest_total_path);
1766 : Assert(subpath->parent == rel);
1767 : /* ... or if SpecialJoinInfo is the wrong one */
1768 : Assert(sjinfo->jointype == JOIN_SEMI);
1769 : Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1770 :
1771 : /* If result already cached, return it */
1772 47986 : if (rel->cheapest_unique_path)
1773 41462 : return (UniquePath *) rel->cheapest_unique_path;
1774 :
1775 : /* If it's not possible to unique-ify, return NULL */
1776 6524 : if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1777 132 : return NULL;
1778 :
1779 : /*
1780 : * When called during GEQO join planning, we are in a short-lived memory
1781 : * context. We must make sure that the path and any subsidiary data
1782 : * structures created for a baserel survive the GEQO cycle, else the
1783 : * baserel is trashed for future GEQO cycles. On the other hand, when we
1784 : * are creating those for a joinrel during GEQO, we don't want them to
1785 : * clutter the main planning context. Upshot is that the best solution is
1786 : * to explicitly allocate memory in the same context the given RelOptInfo
1787 : * is in.
1788 : */
1789 6392 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1790 :
1791 6392 : pathnode = makeNode(UniquePath);
1792 :
1793 6392 : pathnode->path.pathtype = T_Unique;
1794 6392 : pathnode->path.parent = rel;
1795 6392 : pathnode->path.pathtarget = rel->reltarget;
1796 6392 : pathnode->path.param_info = subpath->param_info;
1797 6392 : pathnode->path.parallel_aware = false;
1798 12078 : pathnode->path.parallel_safe = rel->consider_parallel &&
1799 5686 : subpath->parallel_safe;
1800 6392 : pathnode->path.parallel_workers = subpath->parallel_workers;
1801 :
1802 : /*
1803 : * Assume the output is unsorted, since we don't necessarily have pathkeys
1804 : * to represent it. (This might get overridden below.)
1805 : */
1806 6392 : pathnode->path.pathkeys = NIL;
1807 :
1808 6392 : pathnode->subpath = subpath;
1809 :
1810 : /*
1811 : * Under GEQO and when planning child joins, the sjinfo might be
1812 : * short-lived, so we'd better make copies of data structures we extract
1813 : * from it.
1814 : */
1815 6392 : pathnode->in_operators = copyObject(sjinfo->semi_operators);
1816 6392 : pathnode->uniq_exprs = copyObject(sjinfo->semi_rhs_exprs);
1817 :
1818 : /*
1819 : * If the input is a relation and it has a unique index that proves the
1820 : * semi_rhs_exprs are unique, then we don't need to do anything. Note
1821 : * that relation_has_unique_index_for automatically considers restriction
1822 : * clauses for the rel, as well.
1823 : */
1824 7328 : if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1825 936 : relation_has_unique_index_for(root, rel, NIL,
1826 : sjinfo->semi_rhs_exprs,
1827 : sjinfo->semi_operators))
1828 : {
1829 0 : pathnode->umethod = UNIQUE_PATH_NOOP;
1830 0 : pathnode->path.rows = rel->rows;
1831 0 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
1832 0 : pathnode->path.startup_cost = subpath->startup_cost;
1833 0 : pathnode->path.total_cost = subpath->total_cost;
1834 0 : pathnode->path.pathkeys = subpath->pathkeys;
1835 :
1836 0 : rel->cheapest_unique_path = (Path *) pathnode;
1837 :
1838 0 : MemoryContextSwitchTo(oldcontext);
1839 :
1840 0 : return pathnode;
1841 : }
1842 :
1843 : /*
1844 : * If the input is a subquery whose output must be unique already, then we
1845 : * don't need to do anything. The test for uniqueness has to consider
1846 : * exactly which columns we are extracting; for example "SELECT DISTINCT
1847 : * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1848 : * this optimization unless semi_rhs_exprs consists only of simple Vars
1849 : * referencing subquery outputs. (Possibly we could do something with
1850 : * expressions in the subquery outputs, too, but for now keep it simple.)
1851 : */
1852 6392 : if (rel->rtekind == RTE_SUBQUERY)
1853 : {
1854 3328 : RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1855 :
1856 3328 : if (query_supports_distinctness(rte->subquery))
1857 : {
1858 : List *sub_tlist_colnos;
1859 :
1860 3268 : sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1861 3268 : rel->relid);
1862 :
1863 3490 : if (sub_tlist_colnos &&
1864 222 : query_is_distinct_for(rte->subquery,
1865 : sub_tlist_colnos,
1866 : sjinfo->semi_operators))
1867 : {
1868 0 : pathnode->umethod = UNIQUE_PATH_NOOP;
1869 0 : pathnode->path.rows = rel->rows;
1870 0 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
1871 0 : pathnode->path.startup_cost = subpath->startup_cost;
1872 0 : pathnode->path.total_cost = subpath->total_cost;
1873 0 : pathnode->path.pathkeys = subpath->pathkeys;
1874 :
1875 0 : rel->cheapest_unique_path = (Path *) pathnode;
1876 :
1877 0 : MemoryContextSwitchTo(oldcontext);
1878 :
1879 0 : return pathnode;
1880 : }
1881 : }
1882 : }
1883 :
1884 : /* Estimate number of output rows */
1885 6392 : pathnode->path.rows = estimate_num_groups(root,
1886 : sjinfo->semi_rhs_exprs,
1887 : rel->rows,
1888 : NULL,
1889 : NULL);
1890 6392 : numCols = list_length(sjinfo->semi_rhs_exprs);
1891 :
1892 6392 : if (sjinfo->semi_can_btree)
1893 : {
1894 : /*
1895 : * Estimate cost for sort+unique implementation
1896 : */
1897 6392 : cost_sort(&sort_path, root, NIL,
1898 : subpath->disabled_nodes,
1899 : subpath->total_cost,
1900 : rel->rows,
1901 6392 : subpath->pathtarget->width,
1902 : 0.0,
1903 : work_mem,
1904 : -1.0);
1905 :
1906 : /*
1907 : * Charge one cpu_operator_cost per comparison per input tuple. We
1908 : * assume all columns get compared at most of the tuples. (XXX
1909 : * probably this is an overestimate.) This should agree with
1910 : * create_upper_unique_path.
1911 : */
1912 6392 : sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1913 : }
1914 :
1915 6392 : if (sjinfo->semi_can_hash)
1916 : {
1917 : /*
1918 : * Estimate the overhead per hashtable entry at 64 bytes (same as in
1919 : * planner.c).
1920 : */
1921 6392 : int hashentrysize = subpath->pathtarget->width + 64;
1922 :
1923 6392 : if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1924 : {
1925 : /*
1926 : * We should not try to hash. Hack the SpecialJoinInfo to
1927 : * remember this, in case we come through here again.
1928 : */
1929 0 : sjinfo->semi_can_hash = false;
1930 : }
1931 : else
1932 6392 : cost_agg(&agg_path, root,
1933 : AGG_HASHED, NULL,
1934 : numCols, pathnode->path.rows,
1935 : NIL,
1936 : subpath->disabled_nodes,
1937 : subpath->startup_cost,
1938 : subpath->total_cost,
1939 : rel->rows,
1940 6392 : subpath->pathtarget->width);
1941 : }
1942 :
1943 6392 : if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1944 : {
1945 6392 : if (agg_path.disabled_nodes < sort_path.disabled_nodes ||
1946 6386 : (agg_path.disabled_nodes == sort_path.disabled_nodes &&
1947 6386 : agg_path.total_cost < sort_path.total_cost))
1948 6108 : pathnode->umethod = UNIQUE_PATH_HASH;
1949 : else
1950 284 : pathnode->umethod = UNIQUE_PATH_SORT;
1951 : }
1952 0 : else if (sjinfo->semi_can_btree)
1953 0 : pathnode->umethod = UNIQUE_PATH_SORT;
1954 0 : else if (sjinfo->semi_can_hash)
1955 0 : pathnode->umethod = UNIQUE_PATH_HASH;
1956 : else
1957 : {
1958 : /* we can get here only if we abandoned hashing above */
1959 0 : MemoryContextSwitchTo(oldcontext);
1960 0 : return NULL;
1961 : }
1962 :
1963 6392 : if (pathnode->umethod == UNIQUE_PATH_HASH)
1964 : {
1965 6108 : pathnode->path.disabled_nodes = agg_path.disabled_nodes;
1966 6108 : pathnode->path.startup_cost = agg_path.startup_cost;
1967 6108 : pathnode->path.total_cost = agg_path.total_cost;
1968 : }
1969 : else
1970 : {
1971 284 : pathnode->path.disabled_nodes = sort_path.disabled_nodes;
1972 284 : pathnode->path.startup_cost = sort_path.startup_cost;
1973 284 : pathnode->path.total_cost = sort_path.total_cost;
1974 : }
1975 :
1976 6392 : rel->cheapest_unique_path = (Path *) pathnode;
1977 :
1978 6392 : MemoryContextSwitchTo(oldcontext);
1979 :
1980 6392 : return pathnode;
1981 : }
1982 :
1983 : /*
1984 : * create_gather_merge_path
1985 : *
1986 : * Creates a path corresponding to a gather merge scan, returning
1987 : * the pathnode.
1988 : */
1989 : GatherMergePath *
1990 10154 : create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1991 : PathTarget *target, List *pathkeys,
1992 : Relids required_outer, double *rows)
1993 : {
1994 10154 : GatherMergePath *pathnode = makeNode(GatherMergePath);
1995 10154 : int input_disabled_nodes = 0;
1996 10154 : Cost input_startup_cost = 0;
1997 10154 : Cost input_total_cost = 0;
1998 :
1999 : Assert(subpath->parallel_safe);
2000 : Assert(pathkeys);
2001 :
2002 : /*
2003 : * The subpath should guarantee that it is adequately ordered either by
2004 : * adding an explicit sort node or by using presorted input. We cannot
2005 : * add an explicit Sort node for the subpath in createplan.c on additional
2006 : * pathkeys, because we can't guarantee the sort would be safe. For
2007 : * example, expressions may be volatile or otherwise parallel unsafe.
2008 : */
2009 10154 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
2010 0 : elog(ERROR, "gather merge input not sufficiently sorted");
2011 :
2012 10154 : pathnode->path.pathtype = T_GatherMerge;
2013 10154 : pathnode->path.parent = rel;
2014 10154 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2015 : required_outer);
2016 10154 : pathnode->path.parallel_aware = false;
2017 :
2018 10154 : pathnode->subpath = subpath;
2019 10154 : pathnode->num_workers = subpath->parallel_workers;
2020 10154 : pathnode->path.pathkeys = pathkeys;
2021 10154 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2022 :
2023 10154 : input_disabled_nodes += subpath->disabled_nodes;
2024 10154 : input_startup_cost += subpath->startup_cost;
2025 10154 : input_total_cost += subpath->total_cost;
2026 :
2027 10154 : cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
2028 : input_disabled_nodes, input_startup_cost,
2029 : input_total_cost, rows);
2030 :
2031 10154 : return pathnode;
2032 : }
2033 :
2034 : /*
2035 : * translate_sub_tlist - get subquery column numbers represented by tlist
2036 : *
2037 : * The given targetlist usually contains only Vars referencing the given relid.
2038 : * Extract their varattnos (ie, the column numbers of the subquery) and return
2039 : * as an integer List.
2040 : *
2041 : * If any of the tlist items is not a simple Var, we cannot determine whether
2042 : * the subquery's uniqueness condition (if any) matches ours, so punt and
2043 : * return NIL.
2044 : */
2045 : static List *
2046 3268 : translate_sub_tlist(List *tlist, int relid)
2047 : {
2048 3268 : List *result = NIL;
2049 : ListCell *l;
2050 :
2051 3490 : foreach(l, tlist)
2052 : {
2053 3268 : Var *var = (Var *) lfirst(l);
2054 :
2055 3268 : if (!var || !IsA(var, Var) ||
2056 222 : var->varno != relid)
2057 3046 : return NIL; /* punt */
2058 :
2059 222 : result = lappend_int(result, var->varattno);
2060 : }
2061 222 : return result;
2062 : }
2063 :
2064 : /*
2065 : * create_gather_path
2066 : * Creates a path corresponding to a gather scan, returning the
2067 : * pathnode.
2068 : *
2069 : * 'rows' may optionally be set to override row estimates from other sources.
2070 : */
2071 : GatherPath *
2072 19084 : create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
2073 : PathTarget *target, Relids required_outer, double *rows)
2074 : {
2075 19084 : GatherPath *pathnode = makeNode(GatherPath);
2076 :
2077 : Assert(subpath->parallel_safe);
2078 :
2079 19084 : pathnode->path.pathtype = T_Gather;
2080 19084 : pathnode->path.parent = rel;
2081 19084 : pathnode->path.pathtarget = target;
2082 19084 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2083 : required_outer);
2084 19084 : pathnode->path.parallel_aware = false;
2085 19084 : pathnode->path.parallel_safe = false;
2086 19084 : pathnode->path.parallel_workers = 0;
2087 19084 : pathnode->path.pathkeys = NIL; /* Gather has unordered result */
2088 :
2089 19084 : pathnode->subpath = subpath;
2090 19084 : pathnode->num_workers = subpath->parallel_workers;
2091 19084 : pathnode->single_copy = false;
2092 :
2093 19084 : if (pathnode->num_workers == 0)
2094 : {
2095 0 : pathnode->path.pathkeys = subpath->pathkeys;
2096 0 : pathnode->num_workers = 1;
2097 0 : pathnode->single_copy = true;
2098 : }
2099 :
2100 19084 : cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
2101 :
2102 19084 : return pathnode;
2103 : }
2104 :
2105 : /*
2106 : * create_subqueryscan_path
2107 : * Creates a path corresponding to a scan of a subquery,
2108 : * returning the pathnode.
2109 : *
2110 : * Caller must pass trivial_pathtarget = true if it believes rel->reltarget to
2111 : * be trivial, ie just a fetch of all the subquery output columns in order.
2112 : * While we could determine that here, the caller can usually do it more
2113 : * efficiently (or at least amortize it over multiple calls).
2114 : */
2115 : SubqueryScanPath *
2116 48412 : create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
2117 : bool trivial_pathtarget,
2118 : List *pathkeys, Relids required_outer)
2119 : {
2120 48412 : SubqueryScanPath *pathnode = makeNode(SubqueryScanPath);
2121 :
2122 48412 : pathnode->path.pathtype = T_SubqueryScan;
2123 48412 : pathnode->path.parent = rel;
2124 48412 : pathnode->path.pathtarget = rel->reltarget;
2125 48412 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2126 : required_outer);
2127 48412 : pathnode->path.parallel_aware = false;
2128 81518 : pathnode->path.parallel_safe = rel->consider_parallel &&
2129 33106 : subpath->parallel_safe;
2130 48412 : pathnode->path.parallel_workers = subpath->parallel_workers;
2131 48412 : pathnode->path.pathkeys = pathkeys;
2132 48412 : pathnode->subpath = subpath;
2133 :
2134 48412 : cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2135 : trivial_pathtarget);
2136 :
2137 48412 : return pathnode;
2138 : }
2139 :
2140 : /*
2141 : * create_functionscan_path
2142 : * Creates a path corresponding to a sequential scan of a function,
2143 : * returning the pathnode.
2144 : */
2145 : Path *
2146 50576 : create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
2147 : List *pathkeys, Relids required_outer)
2148 : {
2149 50576 : Path *pathnode = makeNode(Path);
2150 :
2151 50576 : pathnode->pathtype = T_FunctionScan;
2152 50576 : pathnode->parent = rel;
2153 50576 : pathnode->pathtarget = rel->reltarget;
2154 50576 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2155 : required_outer);
2156 50576 : pathnode->parallel_aware = false;
2157 50576 : pathnode->parallel_safe = rel->consider_parallel;
2158 50576 : pathnode->parallel_workers = 0;
2159 50576 : pathnode->pathkeys = pathkeys;
2160 :
2161 50576 : cost_functionscan(pathnode, root, rel, pathnode->param_info);
2162 :
2163 50576 : return pathnode;
2164 : }
2165 :
2166 : /*
2167 : * create_tablefuncscan_path
2168 : * Creates a path corresponding to a sequential scan of a table function,
2169 : * returning the pathnode.
2170 : */
2171 : Path *
2172 626 : create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel,
2173 : Relids required_outer)
2174 : {
2175 626 : Path *pathnode = makeNode(Path);
2176 :
2177 626 : pathnode->pathtype = T_TableFuncScan;
2178 626 : pathnode->parent = rel;
2179 626 : pathnode->pathtarget = rel->reltarget;
2180 626 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2181 : required_outer);
2182 626 : pathnode->parallel_aware = false;
2183 626 : pathnode->parallel_safe = rel->consider_parallel;
2184 626 : pathnode->parallel_workers = 0;
2185 626 : pathnode->pathkeys = NIL; /* result is always unordered */
2186 :
2187 626 : cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2188 :
2189 626 : return pathnode;
2190 : }
2191 :
2192 : /*
2193 : * create_valuesscan_path
2194 : * Creates a path corresponding to a scan of a VALUES list,
2195 : * returning the pathnode.
2196 : */
2197 : Path *
2198 8216 : create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
2199 : Relids required_outer)
2200 : {
2201 8216 : Path *pathnode = makeNode(Path);
2202 :
2203 8216 : pathnode->pathtype = T_ValuesScan;
2204 8216 : pathnode->parent = rel;
2205 8216 : pathnode->pathtarget = rel->reltarget;
2206 8216 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2207 : required_outer);
2208 8216 : pathnode->parallel_aware = false;
2209 8216 : pathnode->parallel_safe = rel->consider_parallel;
2210 8216 : pathnode->parallel_workers = 0;
2211 8216 : pathnode->pathkeys = NIL; /* result is always unordered */
2212 :
2213 8216 : cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2214 :
2215 8216 : return pathnode;
2216 : }
2217 :
2218 : /*
2219 : * create_ctescan_path
2220 : * Creates a path corresponding to a scan of a non-self-reference CTE,
2221 : * returning the pathnode.
2222 : */
2223 : Path *
2224 4250 : create_ctescan_path(PlannerInfo *root, RelOptInfo *rel,
2225 : List *pathkeys, Relids required_outer)
2226 : {
2227 4250 : Path *pathnode = makeNode(Path);
2228 :
2229 4250 : pathnode->pathtype = T_CteScan;
2230 4250 : pathnode->parent = rel;
2231 4250 : pathnode->pathtarget = rel->reltarget;
2232 4250 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2233 : required_outer);
2234 4250 : pathnode->parallel_aware = false;
2235 4250 : pathnode->parallel_safe = rel->consider_parallel;
2236 4250 : pathnode->parallel_workers = 0;
2237 4250 : pathnode->pathkeys = pathkeys;
2238 :
2239 4250 : cost_ctescan(pathnode, root, rel, pathnode->param_info);
2240 :
2241 4250 : return pathnode;
2242 : }
2243 :
2244 : /*
2245 : * create_namedtuplestorescan_path
2246 : * Creates a path corresponding to a scan of a named tuplestore, returning
2247 : * the pathnode.
2248 : */
2249 : Path *
2250 482 : create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel,
2251 : Relids required_outer)
2252 : {
2253 482 : Path *pathnode = makeNode(Path);
2254 :
2255 482 : pathnode->pathtype = T_NamedTuplestoreScan;
2256 482 : pathnode->parent = rel;
2257 482 : pathnode->pathtarget = rel->reltarget;
2258 482 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2259 : required_outer);
2260 482 : pathnode->parallel_aware = false;
2261 482 : pathnode->parallel_safe = rel->consider_parallel;
2262 482 : pathnode->parallel_workers = 0;
2263 482 : pathnode->pathkeys = NIL; /* result is always unordered */
2264 :
2265 482 : cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2266 :
2267 482 : return pathnode;
2268 : }
2269 :
2270 : /*
2271 : * create_resultscan_path
2272 : * Creates a path corresponding to a scan of an RTE_RESULT relation,
2273 : * returning the pathnode.
2274 : */
2275 : Path *
2276 4268 : create_resultscan_path(PlannerInfo *root, RelOptInfo *rel,
2277 : Relids required_outer)
2278 : {
2279 4268 : Path *pathnode = makeNode(Path);
2280 :
2281 4268 : pathnode->pathtype = T_Result;
2282 4268 : pathnode->parent = rel;
2283 4268 : pathnode->pathtarget = rel->reltarget;
2284 4268 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2285 : required_outer);
2286 4268 : pathnode->parallel_aware = false;
2287 4268 : pathnode->parallel_safe = rel->consider_parallel;
2288 4268 : pathnode->parallel_workers = 0;
2289 4268 : pathnode->pathkeys = NIL; /* result is always unordered */
2290 :
2291 4268 : cost_resultscan(pathnode, root, rel, pathnode->param_info);
2292 :
2293 4268 : return pathnode;
2294 : }
2295 :
2296 : /*
2297 : * create_worktablescan_path
2298 : * Creates a path corresponding to a scan of a self-reference CTE,
2299 : * returning the pathnode.
2300 : */
2301 : Path *
2302 926 : create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
2303 : Relids required_outer)
2304 : {
2305 926 : Path *pathnode = makeNode(Path);
2306 :
2307 926 : pathnode->pathtype = T_WorkTableScan;
2308 926 : pathnode->parent = rel;
2309 926 : pathnode->pathtarget = rel->reltarget;
2310 926 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2311 : required_outer);
2312 926 : pathnode->parallel_aware = false;
2313 926 : pathnode->parallel_safe = rel->consider_parallel;
2314 926 : pathnode->parallel_workers = 0;
2315 926 : pathnode->pathkeys = NIL; /* result is always unordered */
2316 :
2317 : /* Cost is the same as for a regular CTE scan */
2318 926 : cost_ctescan(pathnode, root, rel, pathnode->param_info);
2319 :
2320 926 : return pathnode;
2321 : }
2322 :
2323 : /*
2324 : * create_foreignscan_path
2325 : * Creates a path corresponding to a scan of a foreign base table,
2326 : * returning the pathnode.
2327 : *
2328 : * This function is never called from core Postgres; rather, it's expected
2329 : * to be called by the GetForeignPaths function of a foreign data wrapper.
2330 : * We make the FDW supply all fields of the path, since we do not have any way
2331 : * to calculate them in core. However, there is a usually-sane default for
2332 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2333 : */
2334 : ForeignPath *
2335 3666 : create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
2336 : PathTarget *target,
2337 : double rows, int disabled_nodes,
2338 : Cost startup_cost, Cost total_cost,
2339 : List *pathkeys,
2340 : Relids required_outer,
2341 : Path *fdw_outerpath,
2342 : List *fdw_restrictinfo,
2343 : List *fdw_private)
2344 : {
2345 3666 : ForeignPath *pathnode = makeNode(ForeignPath);
2346 :
2347 : /* Historically some FDWs were confused about when to use this */
2348 : Assert(IS_SIMPLE_REL(rel));
2349 :
2350 3666 : pathnode->path.pathtype = T_ForeignScan;
2351 3666 : pathnode->path.parent = rel;
2352 3666 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2353 3666 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2354 : required_outer);
2355 3666 : pathnode->path.parallel_aware = false;
2356 3666 : pathnode->path.parallel_safe = rel->consider_parallel;
2357 3666 : pathnode->path.parallel_workers = 0;
2358 3666 : pathnode->path.rows = rows;
2359 3666 : pathnode->path.disabled_nodes = disabled_nodes;
2360 3666 : pathnode->path.startup_cost = startup_cost;
2361 3666 : pathnode->path.total_cost = total_cost;
2362 3666 : pathnode->path.pathkeys = pathkeys;
2363 :
2364 3666 : pathnode->fdw_outerpath = fdw_outerpath;
2365 3666 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2366 3666 : pathnode->fdw_private = fdw_private;
2367 :
2368 3666 : return pathnode;
2369 : }
2370 :
2371 : /*
2372 : * create_foreign_join_path
2373 : * Creates a path corresponding to a scan of a foreign join,
2374 : * returning the pathnode.
2375 : *
2376 : * This function is never called from core Postgres; rather, it's expected
2377 : * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
2378 : * We make the FDW supply all fields of the path, since we do not have any way
2379 : * to calculate them in core. However, there is a usually-sane default for
2380 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2381 : */
2382 : ForeignPath *
2383 1200 : create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
2384 : PathTarget *target,
2385 : double rows, int disabled_nodes,
2386 : Cost startup_cost, Cost total_cost,
2387 : List *pathkeys,
2388 : Relids required_outer,
2389 : Path *fdw_outerpath,
2390 : List *fdw_restrictinfo,
2391 : List *fdw_private)
2392 : {
2393 1200 : ForeignPath *pathnode = makeNode(ForeignPath);
2394 :
2395 : /*
2396 : * We should use get_joinrel_parampathinfo to handle parameterized paths,
2397 : * but the API of this function doesn't support it, and existing
2398 : * extensions aren't yet trying to build such paths anyway. For the
2399 : * moment just throw an error if someone tries it; eventually we should
2400 : * revisit this.
2401 : */
2402 1200 : if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2403 0 : elog(ERROR, "parameterized foreign joins are not supported yet");
2404 :
2405 1200 : pathnode->path.pathtype = T_ForeignScan;
2406 1200 : pathnode->path.parent = rel;
2407 1200 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2408 1200 : pathnode->path.param_info = NULL; /* XXX see above */
2409 1200 : pathnode->path.parallel_aware = false;
2410 1200 : pathnode->path.parallel_safe = rel->consider_parallel;
2411 1200 : pathnode->path.parallel_workers = 0;
2412 1200 : pathnode->path.rows = rows;
2413 1200 : pathnode->path.disabled_nodes = disabled_nodes;
2414 1200 : pathnode->path.startup_cost = startup_cost;
2415 1200 : pathnode->path.total_cost = total_cost;
2416 1200 : pathnode->path.pathkeys = pathkeys;
2417 :
2418 1200 : pathnode->fdw_outerpath = fdw_outerpath;
2419 1200 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2420 1200 : pathnode->fdw_private = fdw_private;
2421 :
2422 1200 : return pathnode;
2423 : }
2424 :
2425 : /*
2426 : * create_foreign_upper_path
2427 : * Creates a path corresponding to an upper relation that's computed
2428 : * directly by an FDW, returning the pathnode.
2429 : *
2430 : * This function is never called from core Postgres; rather, it's expected to
2431 : * be called by the GetForeignUpperPaths function of a foreign data wrapper.
2432 : * We make the FDW supply all fields of the path, since we do not have any way
2433 : * to calculate them in core. However, there is a usually-sane default for
2434 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2435 : */
2436 : ForeignPath *
2437 588 : create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel,
2438 : PathTarget *target,
2439 : double rows, int disabled_nodes,
2440 : Cost startup_cost, Cost total_cost,
2441 : List *pathkeys,
2442 : Path *fdw_outerpath,
2443 : List *fdw_restrictinfo,
2444 : List *fdw_private)
2445 : {
2446 588 : ForeignPath *pathnode = makeNode(ForeignPath);
2447 :
2448 : /*
2449 : * Upper relations should never have any lateral references, since joining
2450 : * is complete.
2451 : */
2452 : Assert(bms_is_empty(rel->lateral_relids));
2453 :
2454 588 : pathnode->path.pathtype = T_ForeignScan;
2455 588 : pathnode->path.parent = rel;
2456 588 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2457 588 : pathnode->path.param_info = NULL;
2458 588 : pathnode->path.parallel_aware = false;
2459 588 : pathnode->path.parallel_safe = rel->consider_parallel;
2460 588 : pathnode->path.parallel_workers = 0;
2461 588 : pathnode->path.rows = rows;
2462 588 : pathnode->path.disabled_nodes = disabled_nodes;
2463 588 : pathnode->path.startup_cost = startup_cost;
2464 588 : pathnode->path.total_cost = total_cost;
2465 588 : pathnode->path.pathkeys = pathkeys;
2466 :
2467 588 : pathnode->fdw_outerpath = fdw_outerpath;
2468 588 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2469 588 : pathnode->fdw_private = fdw_private;
2470 :
2471 588 : return pathnode;
2472 : }
2473 :
2474 : /*
2475 : * calc_nestloop_required_outer
2476 : * Compute the required_outer set for a nestloop join path
2477 : *
2478 : * Note: when considering a child join, the inputs nonetheless use top-level
2479 : * parent relids
2480 : *
2481 : * Note: result must not share storage with either input
2482 : */
2483 : Relids
2484 2910086 : calc_nestloop_required_outer(Relids outerrelids,
2485 : Relids outer_paramrels,
2486 : Relids innerrelids,
2487 : Relids inner_paramrels)
2488 : {
2489 : Relids required_outer;
2490 :
2491 : /* inner_path can require rels from outer path, but not vice versa */
2492 : Assert(!bms_overlap(outer_paramrels, innerrelids));
2493 : /* easy case if inner path is not parameterized */
2494 2910086 : if (!inner_paramrels)
2495 1967370 : return bms_copy(outer_paramrels);
2496 : /* else, form the union ... */
2497 942716 : required_outer = bms_union(outer_paramrels, inner_paramrels);
2498 : /* ... and remove any mention of now-satisfied outer rels */
2499 942716 : required_outer = bms_del_members(required_outer,
2500 : outerrelids);
2501 942716 : return required_outer;
2502 : }
2503 :
2504 : /*
2505 : * calc_non_nestloop_required_outer
2506 : * Compute the required_outer set for a merge or hash join path
2507 : *
2508 : * Note: result must not share storage with either input
2509 : */
2510 : Relids
2511 1906726 : calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
2512 : {
2513 1906726 : Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2514 1906726 : Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2515 : Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2516 : Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2517 : Relids required_outer;
2518 :
2519 : /*
2520 : * Any parameterization of the input paths refers to topmost parents of
2521 : * the relevant relations, because reparameterize_path_by_child() hasn't
2522 : * been called yet. So we must consider topmost parents of the relations
2523 : * being joined, too, while checking for disallowed parameterization
2524 : * cases.
2525 : */
2526 1906726 : if (inner_path->parent->top_parent_relids)
2527 37598 : innerrelids = inner_path->parent->top_parent_relids;
2528 : else
2529 1869128 : innerrelids = inner_path->parent->relids;
2530 :
2531 1906726 : if (outer_path->parent->top_parent_relids)
2532 37598 : outerrelids = outer_path->parent->top_parent_relids;
2533 : else
2534 1869128 : outerrelids = outer_path->parent->relids;
2535 :
2536 : /* neither path can require rels from the other */
2537 : Assert(!bms_overlap(outer_paramrels, innerrelids));
2538 : Assert(!bms_overlap(inner_paramrels, outerrelids));
2539 : /* form the union ... */
2540 1906726 : required_outer = bms_union(outer_paramrels, inner_paramrels);
2541 : /* we do not need an explicit test for empty; bms_union gets it right */
2542 1906726 : return required_outer;
2543 : }
2544 :
2545 : /*
2546 : * create_nestloop_path
2547 : * Creates a pathnode corresponding to a nestloop join between two
2548 : * relations.
2549 : *
2550 : * 'joinrel' is the join relation.
2551 : * 'jointype' is the type of join required
2552 : * 'workspace' is the result from initial_cost_nestloop
2553 : * 'extra' contains various information about the join
2554 : * 'outer_path' is the outer path
2555 : * 'inner_path' is the inner path
2556 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2557 : * 'pathkeys' are the path keys of the new join path
2558 : * 'required_outer' is the set of required outer rels
2559 : *
2560 : * Returns the resulting path node.
2561 : */
2562 : NestPath *
2563 1311624 : create_nestloop_path(PlannerInfo *root,
2564 : RelOptInfo *joinrel,
2565 : JoinType jointype,
2566 : JoinCostWorkspace *workspace,
2567 : JoinPathExtraData *extra,
2568 : Path *outer_path,
2569 : Path *inner_path,
2570 : List *restrict_clauses,
2571 : List *pathkeys,
2572 : Relids required_outer)
2573 : {
2574 1311624 : NestPath *pathnode = makeNode(NestPath);
2575 1311624 : Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2576 : Relids outerrelids;
2577 :
2578 : /*
2579 : * Paths are parameterized by top-level parents, so run parameterization
2580 : * tests on the parent relids.
2581 : */
2582 1311624 : if (outer_path->parent->top_parent_relids)
2583 18820 : outerrelids = outer_path->parent->top_parent_relids;
2584 : else
2585 1292804 : outerrelids = outer_path->parent->relids;
2586 :
2587 : /*
2588 : * If the inner path is parameterized by the outer, we must drop any
2589 : * restrict_clauses that are due to be moved into the inner path. We have
2590 : * to do this now, rather than postpone the work till createplan time,
2591 : * because the restrict_clauses list can affect the size and cost
2592 : * estimates for this path. We detect such clauses by checking for serial
2593 : * number match to clauses already enforced in the inner path.
2594 : */
2595 1311624 : if (bms_overlap(inner_req_outer, outerrelids))
2596 : {
2597 368224 : Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2598 368224 : List *jclauses = NIL;
2599 : ListCell *lc;
2600 :
2601 816260 : foreach(lc, restrict_clauses)
2602 : {
2603 448036 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2604 :
2605 448036 : if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2606 57406 : jclauses = lappend(jclauses, rinfo);
2607 : }
2608 368224 : restrict_clauses = jclauses;
2609 : }
2610 :
2611 1311624 : pathnode->jpath.path.pathtype = T_NestLoop;
2612 1311624 : pathnode->jpath.path.parent = joinrel;
2613 1311624 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2614 1311624 : pathnode->jpath.path.param_info =
2615 1311624 : get_joinrel_parampathinfo(root,
2616 : joinrel,
2617 : outer_path,
2618 : inner_path,
2619 : extra->sjinfo,
2620 : required_outer,
2621 : &restrict_clauses);
2622 1311624 : pathnode->jpath.path.parallel_aware = false;
2623 3812614 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2624 1311624 : outer_path->parallel_safe && inner_path->parallel_safe;
2625 : /* This is a foolish way to estimate parallel_workers, but for now... */
2626 1311624 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2627 1311624 : pathnode->jpath.path.pathkeys = pathkeys;
2628 1311624 : pathnode->jpath.jointype = jointype;
2629 1311624 : pathnode->jpath.inner_unique = extra->inner_unique;
2630 1311624 : pathnode->jpath.outerjoinpath = outer_path;
2631 1311624 : pathnode->jpath.innerjoinpath = inner_path;
2632 1311624 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2633 :
2634 1311624 : final_cost_nestloop(root, pathnode, workspace, extra);
2635 :
2636 1311624 : return pathnode;
2637 : }
2638 :
2639 : /*
2640 : * create_mergejoin_path
2641 : * Creates a pathnode corresponding to a mergejoin join between
2642 : * two relations
2643 : *
2644 : * 'joinrel' is the join relation
2645 : * 'jointype' is the type of join required
2646 : * 'workspace' is the result from initial_cost_mergejoin
2647 : * 'extra' contains various information about the join
2648 : * 'outer_path' is the outer path
2649 : * 'inner_path' is the inner path
2650 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2651 : * 'pathkeys' are the path keys of the new join path
2652 : * 'required_outer' is the set of required outer rels
2653 : * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
2654 : * (this should be a subset of the restrict_clauses list)
2655 : * 'outersortkeys' are the sort varkeys for the outer relation
2656 : * 'innersortkeys' are the sort varkeys for the inner relation
2657 : * 'outer_presorted_keys' is the number of presorted keys of the outer path
2658 : */
2659 : MergePath *
2660 305728 : create_mergejoin_path(PlannerInfo *root,
2661 : RelOptInfo *joinrel,
2662 : JoinType jointype,
2663 : JoinCostWorkspace *workspace,
2664 : JoinPathExtraData *extra,
2665 : Path *outer_path,
2666 : Path *inner_path,
2667 : List *restrict_clauses,
2668 : List *pathkeys,
2669 : Relids required_outer,
2670 : List *mergeclauses,
2671 : List *outersortkeys,
2672 : List *innersortkeys,
2673 : int outer_presorted_keys)
2674 : {
2675 305728 : MergePath *pathnode = makeNode(MergePath);
2676 :
2677 305728 : pathnode->jpath.path.pathtype = T_MergeJoin;
2678 305728 : pathnode->jpath.path.parent = joinrel;
2679 305728 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2680 305728 : pathnode->jpath.path.param_info =
2681 305728 : get_joinrel_parampathinfo(root,
2682 : joinrel,
2683 : outer_path,
2684 : inner_path,
2685 : extra->sjinfo,
2686 : required_outer,
2687 : &restrict_clauses);
2688 305728 : pathnode->jpath.path.parallel_aware = false;
2689 881726 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2690 305728 : outer_path->parallel_safe && inner_path->parallel_safe;
2691 : /* This is a foolish way to estimate parallel_workers, but for now... */
2692 305728 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2693 305728 : pathnode->jpath.path.pathkeys = pathkeys;
2694 305728 : pathnode->jpath.jointype = jointype;
2695 305728 : pathnode->jpath.inner_unique = extra->inner_unique;
2696 305728 : pathnode->jpath.outerjoinpath = outer_path;
2697 305728 : pathnode->jpath.innerjoinpath = inner_path;
2698 305728 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2699 305728 : pathnode->path_mergeclauses = mergeclauses;
2700 305728 : pathnode->outersortkeys = outersortkeys;
2701 305728 : pathnode->innersortkeys = innersortkeys;
2702 305728 : pathnode->outer_presorted_keys = outer_presorted_keys;
2703 : /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2704 : /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2705 :
2706 305728 : final_cost_mergejoin(root, pathnode, workspace, extra);
2707 :
2708 305728 : return pathnode;
2709 : }
2710 :
2711 : /*
2712 : * create_hashjoin_path
2713 : * Creates a pathnode corresponding to a hash join between two relations.
2714 : *
2715 : * 'joinrel' is the join relation
2716 : * 'jointype' is the type of join required
2717 : * 'workspace' is the result from initial_cost_hashjoin
2718 : * 'extra' contains various information about the join
2719 : * 'outer_path' is the cheapest outer path
2720 : * 'inner_path' is the cheapest inner path
2721 : * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
2722 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2723 : * 'required_outer' is the set of required outer rels
2724 : * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
2725 : * (this should be a subset of the restrict_clauses list)
2726 : */
2727 : HashPath *
2728 276426 : create_hashjoin_path(PlannerInfo *root,
2729 : RelOptInfo *joinrel,
2730 : JoinType jointype,
2731 : JoinCostWorkspace *workspace,
2732 : JoinPathExtraData *extra,
2733 : Path *outer_path,
2734 : Path *inner_path,
2735 : bool parallel_hash,
2736 : List *restrict_clauses,
2737 : Relids required_outer,
2738 : List *hashclauses)
2739 : {
2740 276426 : HashPath *pathnode = makeNode(HashPath);
2741 :
2742 276426 : pathnode->jpath.path.pathtype = T_HashJoin;
2743 276426 : pathnode->jpath.path.parent = joinrel;
2744 276426 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2745 276426 : pathnode->jpath.path.param_info =
2746 276426 : get_joinrel_parampathinfo(root,
2747 : joinrel,
2748 : outer_path,
2749 : inner_path,
2750 : extra->sjinfo,
2751 : required_outer,
2752 : &restrict_clauses);
2753 276426 : pathnode->jpath.path.parallel_aware =
2754 276426 : joinrel->consider_parallel && parallel_hash;
2755 794220 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2756 276426 : outer_path->parallel_safe && inner_path->parallel_safe;
2757 : /* This is a foolish way to estimate parallel_workers, but for now... */
2758 276426 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2759 :
2760 : /*
2761 : * A hashjoin never has pathkeys, since its output ordering is
2762 : * unpredictable due to possible batching. XXX If the inner relation is
2763 : * small enough, we could instruct the executor that it must not batch,
2764 : * and then we could assume that the output inherits the outer relation's
2765 : * ordering, which might save a sort step. However there is considerable
2766 : * downside if our estimate of the inner relation size is badly off. For
2767 : * the moment we don't risk it. (Note also that if we wanted to take this
2768 : * seriously, joinpath.c would have to consider many more paths for the
2769 : * outer rel than it does now.)
2770 : */
2771 276426 : pathnode->jpath.path.pathkeys = NIL;
2772 276426 : pathnode->jpath.jointype = jointype;
2773 276426 : pathnode->jpath.inner_unique = extra->inner_unique;
2774 276426 : pathnode->jpath.outerjoinpath = outer_path;
2775 276426 : pathnode->jpath.innerjoinpath = inner_path;
2776 276426 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2777 276426 : pathnode->path_hashclauses = hashclauses;
2778 : /* final_cost_hashjoin will fill in pathnode->num_batches */
2779 :
2780 276426 : final_cost_hashjoin(root, pathnode, workspace, extra);
2781 :
2782 276426 : return pathnode;
2783 : }
2784 :
2785 : /*
2786 : * create_projection_path
2787 : * Creates a pathnode that represents performing a projection.
2788 : *
2789 : * 'rel' is the parent relation associated with the result
2790 : * 'subpath' is the path representing the source of data
2791 : * 'target' is the PathTarget to be computed
2792 : */
2793 : ProjectionPath *
2794 372260 : create_projection_path(PlannerInfo *root,
2795 : RelOptInfo *rel,
2796 : Path *subpath,
2797 : PathTarget *target)
2798 : {
2799 372260 : ProjectionPath *pathnode = makeNode(ProjectionPath);
2800 : PathTarget *oldtarget;
2801 :
2802 : /*
2803 : * We mustn't put a ProjectionPath directly above another; it's useless
2804 : * and will confuse create_projection_plan. Rather than making sure all
2805 : * callers handle that, let's implement it here, by stripping off any
2806 : * ProjectionPath in what we're given. Given this rule, there won't be
2807 : * more than one.
2808 : */
2809 372260 : if (IsA(subpath, ProjectionPath))
2810 : {
2811 12 : ProjectionPath *subpp = (ProjectionPath *) subpath;
2812 :
2813 : Assert(subpp->path.parent == rel);
2814 12 : subpath = subpp->subpath;
2815 : Assert(!IsA(subpath, ProjectionPath));
2816 : }
2817 :
2818 372260 : pathnode->path.pathtype = T_Result;
2819 372260 : pathnode->path.parent = rel;
2820 372260 : pathnode->path.pathtarget = target;
2821 : /* For now, assume we are above any joins, so no parameterization */
2822 372260 : pathnode->path.param_info = NULL;
2823 372260 : pathnode->path.parallel_aware = false;
2824 847564 : pathnode->path.parallel_safe = rel->consider_parallel &&
2825 474910 : subpath->parallel_safe &&
2826 102650 : is_parallel_safe(root, (Node *) target->exprs);
2827 372260 : pathnode->path.parallel_workers = subpath->parallel_workers;
2828 : /* Projection does not change the sort order */
2829 372260 : pathnode->path.pathkeys = subpath->pathkeys;
2830 :
2831 372260 : pathnode->subpath = subpath;
2832 :
2833 : /*
2834 : * We might not need a separate Result node. If the input plan node type
2835 : * can project, we can just tell it to project something else. Or, if it
2836 : * can't project but the desired target has the same expression list as
2837 : * what the input will produce anyway, we can still give it the desired
2838 : * tlist (possibly changing its ressortgroupref labels, but nothing else).
2839 : * Note: in the latter case, create_projection_plan has to recheck our
2840 : * conclusion; see comments therein.
2841 : */
2842 372260 : oldtarget = subpath->pathtarget;
2843 374588 : if (is_projection_capable_path(subpath) ||
2844 2328 : equal(oldtarget->exprs, target->exprs))
2845 : {
2846 : /* No separate Result node needed */
2847 370058 : pathnode->dummypp = true;
2848 :
2849 : /*
2850 : * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2851 : */
2852 370058 : pathnode->path.rows = subpath->rows;
2853 370058 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
2854 370058 : pathnode->path.startup_cost = subpath->startup_cost +
2855 370058 : (target->cost.startup - oldtarget->cost.startup);
2856 370058 : pathnode->path.total_cost = subpath->total_cost +
2857 370058 : (target->cost.startup - oldtarget->cost.startup) +
2858 370058 : (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2859 : }
2860 : else
2861 : {
2862 : /* We really do need the Result node */
2863 2202 : pathnode->dummypp = false;
2864 :
2865 : /*
2866 : * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2867 : * evaluating the tlist. There is no qual to worry about.
2868 : */
2869 2202 : pathnode->path.rows = subpath->rows;
2870 2202 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
2871 2202 : pathnode->path.startup_cost = subpath->startup_cost +
2872 2202 : target->cost.startup;
2873 2202 : pathnode->path.total_cost = subpath->total_cost +
2874 2202 : target->cost.startup +
2875 2202 : (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2876 : }
2877 :
2878 372260 : return pathnode;
2879 : }
2880 :
2881 : /*
2882 : * apply_projection_to_path
2883 : * Add a projection step, or just apply the target directly to given path.
2884 : *
2885 : * This has the same net effect as create_projection_path(), except that if
2886 : * a separate Result plan node isn't needed, we just replace the given path's
2887 : * pathtarget with the desired one. This must be used only when the caller
2888 : * knows that the given path isn't referenced elsewhere and so can be modified
2889 : * in-place.
2890 : *
2891 : * If the input path is a GatherPath or GatherMergePath, we try to push the
2892 : * new target down to its input as well; this is a yet more invasive
2893 : * modification of the input path, which create_projection_path() can't do.
2894 : *
2895 : * Note that we mustn't change the source path's parent link; so when it is
2896 : * add_path'd to "rel" things will be a bit inconsistent. So far that has
2897 : * not caused any trouble.
2898 : *
2899 : * 'rel' is the parent relation associated with the result
2900 : * 'path' is the path representing the source of data
2901 : * 'target' is the PathTarget to be computed
2902 : */
2903 : Path *
2904 13582 : apply_projection_to_path(PlannerInfo *root,
2905 : RelOptInfo *rel,
2906 : Path *path,
2907 : PathTarget *target)
2908 : {
2909 : QualCost oldcost;
2910 :
2911 : /*
2912 : * If given path can't project, we might need a Result node, so make a
2913 : * separate ProjectionPath.
2914 : */
2915 13582 : if (!is_projection_capable_path(path))
2916 1472 : return (Path *) create_projection_path(root, rel, path, target);
2917 :
2918 : /*
2919 : * We can just jam the desired tlist into the existing path, being sure to
2920 : * update its cost estimates appropriately.
2921 : */
2922 12110 : oldcost = path->pathtarget->cost;
2923 12110 : path->pathtarget = target;
2924 :
2925 12110 : path->startup_cost += target->cost.startup - oldcost.startup;
2926 12110 : path->total_cost += target->cost.startup - oldcost.startup +
2927 12110 : (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2928 :
2929 : /*
2930 : * If the path happens to be a Gather or GatherMerge path, we'd like to
2931 : * arrange for the subpath to return the required target list so that
2932 : * workers can help project. But if there is something that is not
2933 : * parallel-safe in the target expressions, then we can't.
2934 : */
2935 12134 : if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2936 24 : is_parallel_safe(root, (Node *) target->exprs))
2937 : {
2938 : /*
2939 : * We always use create_projection_path here, even if the subpath is
2940 : * projection-capable, so as to avoid modifying the subpath in place.
2941 : * It seems unlikely at present that there could be any other
2942 : * references to the subpath, but better safe than sorry.
2943 : *
2944 : * Note that we don't change the parallel path's cost estimates; it
2945 : * might be appropriate to do so, to reflect the fact that the bulk of
2946 : * the target evaluation will happen in workers.
2947 : */
2948 24 : if (IsA(path, GatherPath))
2949 : {
2950 0 : GatherPath *gpath = (GatherPath *) path;
2951 :
2952 0 : gpath->subpath = (Path *)
2953 0 : create_projection_path(root,
2954 0 : gpath->subpath->parent,
2955 : gpath->subpath,
2956 : target);
2957 : }
2958 : else
2959 : {
2960 24 : GatherMergePath *gmpath = (GatherMergePath *) path;
2961 :
2962 24 : gmpath->subpath = (Path *)
2963 24 : create_projection_path(root,
2964 24 : gmpath->subpath->parent,
2965 : gmpath->subpath,
2966 : target);
2967 : }
2968 : }
2969 12086 : else if (path->parallel_safe &&
2970 4584 : !is_parallel_safe(root, (Node *) target->exprs))
2971 : {
2972 : /*
2973 : * We're inserting a parallel-restricted target list into a path
2974 : * currently marked parallel-safe, so we have to mark it as no longer
2975 : * safe.
2976 : */
2977 12 : path->parallel_safe = false;
2978 : }
2979 :
2980 12110 : return path;
2981 : }
2982 :
2983 : /*
2984 : * create_set_projection_path
2985 : * Creates a pathnode that represents performing a projection that
2986 : * includes set-returning functions.
2987 : *
2988 : * 'rel' is the parent relation associated with the result
2989 : * 'subpath' is the path representing the source of data
2990 : * 'target' is the PathTarget to be computed
2991 : */
2992 : ProjectSetPath *
2993 11766 : create_set_projection_path(PlannerInfo *root,
2994 : RelOptInfo *rel,
2995 : Path *subpath,
2996 : PathTarget *target)
2997 : {
2998 11766 : ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2999 : double tlist_rows;
3000 : ListCell *lc;
3001 :
3002 11766 : pathnode->path.pathtype = T_ProjectSet;
3003 11766 : pathnode->path.parent = rel;
3004 11766 : pathnode->path.pathtarget = target;
3005 : /* For now, assume we are above any joins, so no parameterization */
3006 11766 : pathnode->path.param_info = NULL;
3007 11766 : pathnode->path.parallel_aware = false;
3008 27948 : pathnode->path.parallel_safe = rel->consider_parallel &&
3009 16146 : subpath->parallel_safe &&
3010 4380 : is_parallel_safe(root, (Node *) target->exprs);
3011 11766 : pathnode->path.parallel_workers = subpath->parallel_workers;
3012 : /* Projection does not change the sort order XXX? */
3013 11766 : pathnode->path.pathkeys = subpath->pathkeys;
3014 :
3015 11766 : pathnode->subpath = subpath;
3016 :
3017 : /*
3018 : * Estimate number of rows produced by SRFs for each row of input; if
3019 : * there's more than one in this node, use the maximum.
3020 : */
3021 11766 : tlist_rows = 1;
3022 25512 : foreach(lc, target->exprs)
3023 : {
3024 13746 : Node *node = (Node *) lfirst(lc);
3025 : double itemrows;
3026 :
3027 13746 : itemrows = expression_returns_set_rows(root, node);
3028 13746 : if (tlist_rows < itemrows)
3029 11446 : tlist_rows = itemrows;
3030 : }
3031 :
3032 : /*
3033 : * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
3034 : * per input row, and half of cpu_tuple_cost for each added output row.
3035 : * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
3036 : * this estimate later.
3037 : */
3038 11766 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3039 11766 : pathnode->path.rows = subpath->rows * tlist_rows;
3040 11766 : pathnode->path.startup_cost = subpath->startup_cost +
3041 11766 : target->cost.startup;
3042 11766 : pathnode->path.total_cost = subpath->total_cost +
3043 11766 : target->cost.startup +
3044 11766 : (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
3045 11766 : (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
3046 :
3047 11766 : return pathnode;
3048 : }
3049 :
3050 : /*
3051 : * create_incremental_sort_path
3052 : * Creates a pathnode that represents performing an incremental sort.
3053 : *
3054 : * 'rel' is the parent relation associated with the result
3055 : * 'subpath' is the path representing the source of data
3056 : * 'pathkeys' represents the desired sort order
3057 : * 'presorted_keys' is the number of keys by which the input path is
3058 : * already sorted
3059 : * 'limit_tuples' is the estimated bound on the number of output tuples,
3060 : * or -1 if no LIMIT or couldn't estimate
3061 : */
3062 : IncrementalSortPath *
3063 9280 : create_incremental_sort_path(PlannerInfo *root,
3064 : RelOptInfo *rel,
3065 : Path *subpath,
3066 : List *pathkeys,
3067 : int presorted_keys,
3068 : double limit_tuples)
3069 : {
3070 9280 : IncrementalSortPath *sort = makeNode(IncrementalSortPath);
3071 9280 : SortPath *pathnode = &sort->spath;
3072 :
3073 9280 : pathnode->path.pathtype = T_IncrementalSort;
3074 9280 : pathnode->path.parent = rel;
3075 : /* Sort doesn't project, so use source path's pathtarget */
3076 9280 : pathnode->path.pathtarget = subpath->pathtarget;
3077 : /* For now, assume we are above any joins, so no parameterization */
3078 9280 : pathnode->path.param_info = NULL;
3079 9280 : pathnode->path.parallel_aware = false;
3080 13836 : pathnode->path.parallel_safe = rel->consider_parallel &&
3081 4556 : subpath->parallel_safe;
3082 9280 : pathnode->path.parallel_workers = subpath->parallel_workers;
3083 9280 : pathnode->path.pathkeys = pathkeys;
3084 :
3085 9280 : pathnode->subpath = subpath;
3086 :
3087 9280 : cost_incremental_sort(&pathnode->path,
3088 : root, pathkeys, presorted_keys,
3089 : subpath->disabled_nodes,
3090 : subpath->startup_cost,
3091 : subpath->total_cost,
3092 : subpath->rows,
3093 9280 : subpath->pathtarget->width,
3094 : 0.0, /* XXX comparison_cost shouldn't be 0? */
3095 : work_mem, limit_tuples);
3096 :
3097 9280 : sort->nPresortedCols = presorted_keys;
3098 :
3099 9280 : return sort;
3100 : }
3101 :
3102 : /*
3103 : * create_sort_path
3104 : * Creates a pathnode that represents performing an explicit sort.
3105 : *
3106 : * 'rel' is the parent relation associated with the result
3107 : * 'subpath' is the path representing the source of data
3108 : * 'pathkeys' represents the desired sort order
3109 : * 'limit_tuples' is the estimated bound on the number of output tuples,
3110 : * or -1 if no LIMIT or couldn't estimate
3111 : */
3112 : SortPath *
3113 100188 : create_sort_path(PlannerInfo *root,
3114 : RelOptInfo *rel,
3115 : Path *subpath,
3116 : List *pathkeys,
3117 : double limit_tuples)
3118 : {
3119 100188 : SortPath *pathnode = makeNode(SortPath);
3120 :
3121 100188 : pathnode->path.pathtype = T_Sort;
3122 100188 : pathnode->path.parent = rel;
3123 : /* Sort doesn't project, so use source path's pathtarget */
3124 100188 : pathnode->path.pathtarget = subpath->pathtarget;
3125 : /* For now, assume we are above any joins, so no parameterization */
3126 100188 : pathnode->path.param_info = NULL;
3127 100188 : pathnode->path.parallel_aware = false;
3128 171272 : pathnode->path.parallel_safe = rel->consider_parallel &&
3129 71084 : subpath->parallel_safe;
3130 100188 : pathnode->path.parallel_workers = subpath->parallel_workers;
3131 100188 : pathnode->path.pathkeys = pathkeys;
3132 :
3133 100188 : pathnode->subpath = subpath;
3134 :
3135 100188 : cost_sort(&pathnode->path, root, pathkeys,
3136 : subpath->disabled_nodes,
3137 : subpath->total_cost,
3138 : subpath->rows,
3139 100188 : subpath->pathtarget->width,
3140 : 0.0, /* XXX comparison_cost shouldn't be 0? */
3141 : work_mem, limit_tuples);
3142 :
3143 100188 : return pathnode;
3144 : }
3145 :
3146 : /*
3147 : * create_group_path
3148 : * Creates a pathnode that represents performing grouping of presorted input
3149 : *
3150 : * 'rel' is the parent relation associated with the result
3151 : * 'subpath' is the path representing the source of data
3152 : * 'target' is the PathTarget to be computed
3153 : * 'groupClause' is a list of SortGroupClause's representing the grouping
3154 : * 'qual' is the HAVING quals if any
3155 : * 'numGroups' is the estimated number of groups
3156 : */
3157 : GroupPath *
3158 1214 : create_group_path(PlannerInfo *root,
3159 : RelOptInfo *rel,
3160 : Path *subpath,
3161 : List *groupClause,
3162 : List *qual,
3163 : double numGroups)
3164 : {
3165 1214 : GroupPath *pathnode = makeNode(GroupPath);
3166 1214 : PathTarget *target = rel->reltarget;
3167 :
3168 1214 : pathnode->path.pathtype = T_Group;
3169 1214 : pathnode->path.parent = rel;
3170 1214 : pathnode->path.pathtarget = target;
3171 : /* For now, assume we are above any joins, so no parameterization */
3172 1214 : pathnode->path.param_info = NULL;
3173 1214 : pathnode->path.parallel_aware = false;
3174 1958 : pathnode->path.parallel_safe = rel->consider_parallel &&
3175 744 : subpath->parallel_safe;
3176 1214 : pathnode->path.parallel_workers = subpath->parallel_workers;
3177 : /* Group doesn't change sort ordering */
3178 1214 : pathnode->path.pathkeys = subpath->pathkeys;
3179 :
3180 1214 : pathnode->subpath = subpath;
3181 :
3182 1214 : pathnode->groupClause = groupClause;
3183 1214 : pathnode->qual = qual;
3184 :
3185 1214 : cost_group(&pathnode->path, root,
3186 : list_length(groupClause),
3187 : numGroups,
3188 : qual,
3189 : subpath->disabled_nodes,
3190 : subpath->startup_cost, subpath->total_cost,
3191 : subpath->rows);
3192 :
3193 : /* add tlist eval cost for each output row */
3194 1214 : pathnode->path.startup_cost += target->cost.startup;
3195 1214 : pathnode->path.total_cost += target->cost.startup +
3196 1214 : target->cost.per_tuple * pathnode->path.rows;
3197 :
3198 1214 : return pathnode;
3199 : }
3200 :
3201 : /*
3202 : * create_upper_unique_path
3203 : * Creates a pathnode that represents performing an explicit Unique step
3204 : * on presorted input.
3205 : *
3206 : * This produces a Unique plan node, but the use-case is so different from
3207 : * create_unique_path that it doesn't seem worth trying to merge the two.
3208 : *
3209 : * 'rel' is the parent relation associated with the result
3210 : * 'subpath' is the path representing the source of data
3211 : * 'numCols' is the number of grouping columns
3212 : * 'numGroups' is the estimated number of groups
3213 : *
3214 : * The input path must be sorted on the grouping columns, plus possibly
3215 : * additional columns; so the first numCols pathkeys are the grouping columns
3216 : */
3217 : UpperUniquePath *
3218 8124 : create_upper_unique_path(PlannerInfo *root,
3219 : RelOptInfo *rel,
3220 : Path *subpath,
3221 : int numCols,
3222 : double numGroups)
3223 : {
3224 8124 : UpperUniquePath *pathnode = makeNode(UpperUniquePath);
3225 :
3226 8124 : pathnode->path.pathtype = T_Unique;
3227 8124 : pathnode->path.parent = rel;
3228 : /* Unique doesn't project, so use source path's pathtarget */
3229 8124 : pathnode->path.pathtarget = subpath->pathtarget;
3230 : /* For now, assume we are above any joins, so no parameterization */
3231 8124 : pathnode->path.param_info = NULL;
3232 8124 : pathnode->path.parallel_aware = false;
3233 12764 : pathnode->path.parallel_safe = rel->consider_parallel &&
3234 4640 : subpath->parallel_safe;
3235 8124 : pathnode->path.parallel_workers = subpath->parallel_workers;
3236 : /* Unique doesn't change the input ordering */
3237 8124 : pathnode->path.pathkeys = subpath->pathkeys;
3238 :
3239 8124 : pathnode->subpath = subpath;
3240 8124 : pathnode->numkeys = numCols;
3241 :
3242 : /*
3243 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3244 : * all columns get compared at most of the tuples. (XXX probably this is
3245 : * an overestimate.)
3246 : */
3247 8124 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3248 8124 : pathnode->path.startup_cost = subpath->startup_cost;
3249 8124 : pathnode->path.total_cost = subpath->total_cost +
3250 8124 : cpu_operator_cost * subpath->rows * numCols;
3251 8124 : pathnode->path.rows = numGroups;
3252 :
3253 8124 : return pathnode;
3254 : }
3255 :
3256 : /*
3257 : * create_agg_path
3258 : * Creates a pathnode that represents performing aggregation/grouping
3259 : *
3260 : * 'rel' is the parent relation associated with the result
3261 : * 'subpath' is the path representing the source of data
3262 : * 'target' is the PathTarget to be computed
3263 : * 'aggstrategy' is the Agg node's basic implementation strategy
3264 : * 'aggsplit' is the Agg node's aggregate-splitting mode
3265 : * 'groupClause' is a list of SortGroupClause's representing the grouping
3266 : * 'qual' is the HAVING quals if any
3267 : * 'aggcosts' contains cost info about the aggregate functions to be computed
3268 : * 'numGroups' is the estimated number of groups (1 if not grouping)
3269 : */
3270 : AggPath *
3271 57470 : create_agg_path(PlannerInfo *root,
3272 : RelOptInfo *rel,
3273 : Path *subpath,
3274 : PathTarget *target,
3275 : AggStrategy aggstrategy,
3276 : AggSplit aggsplit,
3277 : List *groupClause,
3278 : List *qual,
3279 : const AggClauseCosts *aggcosts,
3280 : double numGroups)
3281 : {
3282 57470 : AggPath *pathnode = makeNode(AggPath);
3283 :
3284 57470 : pathnode->path.pathtype = T_Agg;
3285 57470 : pathnode->path.parent = rel;
3286 57470 : pathnode->path.pathtarget = target;
3287 : /* For now, assume we are above any joins, so no parameterization */
3288 57470 : pathnode->path.param_info = NULL;
3289 57470 : pathnode->path.parallel_aware = false;
3290 96374 : pathnode->path.parallel_safe = rel->consider_parallel &&
3291 38904 : subpath->parallel_safe;
3292 57470 : pathnode->path.parallel_workers = subpath->parallel_workers;
3293 :
3294 57470 : if (aggstrategy == AGG_SORTED)
3295 : {
3296 : /*
3297 : * Attempt to preserve the order of the subpath. Additional pathkeys
3298 : * may have been added in adjust_group_pathkeys_for_groupagg() to
3299 : * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3300 : * belong to columns within the aggregate function, so we must strip
3301 : * these additional pathkeys off as those columns are unavailable
3302 : * above the aggregate node.
3303 : */
3304 7818 : if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3305 328 : pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3306 : root->num_groupby_pathkeys);
3307 : else
3308 7490 : pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3309 : }
3310 : else
3311 49652 : pathnode->path.pathkeys = NIL; /* output is unordered */
3312 :
3313 57470 : pathnode->subpath = subpath;
3314 :
3315 57470 : pathnode->aggstrategy = aggstrategy;
3316 57470 : pathnode->aggsplit = aggsplit;
3317 57470 : pathnode->numGroups = numGroups;
3318 57470 : pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3319 57470 : pathnode->groupClause = groupClause;
3320 57470 : pathnode->qual = qual;
3321 :
3322 57470 : cost_agg(&pathnode->path, root,
3323 : aggstrategy, aggcosts,
3324 : list_length(groupClause), numGroups,
3325 : qual,
3326 : subpath->disabled_nodes,
3327 : subpath->startup_cost, subpath->total_cost,
3328 57470 : subpath->rows, subpath->pathtarget->width);
3329 :
3330 : /* add tlist eval cost for each output row */
3331 57470 : pathnode->path.startup_cost += target->cost.startup;
3332 57470 : pathnode->path.total_cost += target->cost.startup +
3333 57470 : target->cost.per_tuple * pathnode->path.rows;
3334 :
3335 57470 : return pathnode;
3336 : }
3337 :
3338 : /*
3339 : * create_groupingsets_path
3340 : * Creates a pathnode that represents performing GROUPING SETS aggregation
3341 : *
3342 : * GroupingSetsPath represents sorted grouping with one or more grouping sets.
3343 : * The input path's result must be sorted to match the last entry in
3344 : * rollup_groupclauses.
3345 : *
3346 : * 'rel' is the parent relation associated with the result
3347 : * 'subpath' is the path representing the source of data
3348 : * 'target' is the PathTarget to be computed
3349 : * 'having_qual' is the HAVING quals if any
3350 : * 'rollups' is a list of RollupData nodes
3351 : * 'agg_costs' contains cost info about the aggregate functions to be computed
3352 : */
3353 : GroupingSetsPath *
3354 2128 : create_groupingsets_path(PlannerInfo *root,
3355 : RelOptInfo *rel,
3356 : Path *subpath,
3357 : List *having_qual,
3358 : AggStrategy aggstrategy,
3359 : List *rollups,
3360 : const AggClauseCosts *agg_costs)
3361 : {
3362 2128 : GroupingSetsPath *pathnode = makeNode(GroupingSetsPath);
3363 2128 : PathTarget *target = rel->reltarget;
3364 : ListCell *lc;
3365 2128 : bool is_first = true;
3366 2128 : bool is_first_sort = true;
3367 :
3368 : /* The topmost generated Plan node will be an Agg */
3369 2128 : pathnode->path.pathtype = T_Agg;
3370 2128 : pathnode->path.parent = rel;
3371 2128 : pathnode->path.pathtarget = target;
3372 2128 : pathnode->path.param_info = subpath->param_info;
3373 2128 : pathnode->path.parallel_aware = false;
3374 3118 : pathnode->path.parallel_safe = rel->consider_parallel &&
3375 990 : subpath->parallel_safe;
3376 2128 : pathnode->path.parallel_workers = subpath->parallel_workers;
3377 2128 : pathnode->subpath = subpath;
3378 :
3379 : /*
3380 : * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3381 : * to AGG_HASHED, here if possible.
3382 : */
3383 3036 : if (aggstrategy == AGG_SORTED &&
3384 908 : list_length(rollups) == 1 &&
3385 458 : ((RollupData *) linitial(rollups))->groupClause == NIL)
3386 42 : aggstrategy = AGG_PLAIN;
3387 :
3388 3044 : if (aggstrategy == AGG_MIXED &&
3389 916 : list_length(rollups) == 1)
3390 0 : aggstrategy = AGG_HASHED;
3391 :
3392 : /*
3393 : * Output will be in sorted order by group_pathkeys if, and only if, there
3394 : * is a single rollup operation on a non-empty list of grouping
3395 : * expressions.
3396 : */
3397 2128 : if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3398 416 : pathnode->path.pathkeys = root->group_pathkeys;
3399 : else
3400 1712 : pathnode->path.pathkeys = NIL;
3401 :
3402 2128 : pathnode->aggstrategy = aggstrategy;
3403 2128 : pathnode->rollups = rollups;
3404 2128 : pathnode->qual = having_qual;
3405 2128 : pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3406 :
3407 : Assert(rollups != NIL);
3408 : Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3409 : Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3410 :
3411 7416 : foreach(lc, rollups)
3412 : {
3413 5288 : RollupData *rollup = lfirst(lc);
3414 5288 : List *gsets = rollup->gsets;
3415 5288 : int numGroupCols = list_length(linitial(gsets));
3416 :
3417 : /*
3418 : * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3419 : * (already-sorted) input, and following ones do their own sort.
3420 : *
3421 : * In AGG_HASHED mode, there is one rollup for each grouping set.
3422 : *
3423 : * In AGG_MIXED mode, the first rollups are hashed, the first
3424 : * non-hashed one takes the (already-sorted) input, and following ones
3425 : * do their own sort.
3426 : */
3427 5288 : if (is_first)
3428 : {
3429 2128 : cost_agg(&pathnode->path, root,
3430 : aggstrategy,
3431 : agg_costs,
3432 : numGroupCols,
3433 : rollup->numGroups,
3434 : having_qual,
3435 : subpath->disabled_nodes,
3436 : subpath->startup_cost,
3437 : subpath->total_cost,
3438 : subpath->rows,
3439 2128 : subpath->pathtarget->width);
3440 2128 : is_first = false;
3441 2128 : if (!rollup->is_hashed)
3442 908 : is_first_sort = false;
3443 : }
3444 : else
3445 : {
3446 : Path sort_path; /* dummy for result of cost_sort */
3447 : Path agg_path; /* dummy for result of cost_agg */
3448 :
3449 3160 : if (rollup->is_hashed || is_first_sort)
3450 : {
3451 : /*
3452 : * Account for cost of aggregation, but don't charge input
3453 : * cost again
3454 : */
3455 2422 : cost_agg(&agg_path, root,
3456 2422 : rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3457 : agg_costs,
3458 : numGroupCols,
3459 : rollup->numGroups,
3460 : having_qual,
3461 : 0, 0.0, 0.0,
3462 : subpath->rows,
3463 2422 : subpath->pathtarget->width);
3464 2422 : if (!rollup->is_hashed)
3465 916 : is_first_sort = false;
3466 : }
3467 : else
3468 : {
3469 : /* Account for cost of sort, but don't charge input cost again */
3470 738 : cost_sort(&sort_path, root, NIL, 0,
3471 : 0.0,
3472 : subpath->rows,
3473 738 : subpath->pathtarget->width,
3474 : 0.0,
3475 : work_mem,
3476 : -1.0);
3477 :
3478 : /* Account for cost of aggregation */
3479 :
3480 738 : cost_agg(&agg_path, root,
3481 : AGG_SORTED,
3482 : agg_costs,
3483 : numGroupCols,
3484 : rollup->numGroups,
3485 : having_qual,
3486 : sort_path.disabled_nodes,
3487 : sort_path.startup_cost,
3488 : sort_path.total_cost,
3489 : sort_path.rows,
3490 738 : subpath->pathtarget->width);
3491 : }
3492 :
3493 3160 : pathnode->path.disabled_nodes += agg_path.disabled_nodes;
3494 3160 : pathnode->path.total_cost += agg_path.total_cost;
3495 3160 : pathnode->path.rows += agg_path.rows;
3496 : }
3497 : }
3498 :
3499 : /* add tlist eval cost for each output row */
3500 2128 : pathnode->path.startup_cost += target->cost.startup;
3501 2128 : pathnode->path.total_cost += target->cost.startup +
3502 2128 : target->cost.per_tuple * pathnode->path.rows;
3503 :
3504 2128 : return pathnode;
3505 : }
3506 :
3507 : /*
3508 : * create_minmaxagg_path
3509 : * Creates a pathnode that represents computation of MIN/MAX aggregates
3510 : *
3511 : * 'rel' is the parent relation associated with the result
3512 : * 'target' is the PathTarget to be computed
3513 : * 'mmaggregates' is a list of MinMaxAggInfo structs
3514 : * 'quals' is the HAVING quals if any
3515 : */
3516 : MinMaxAggPath *
3517 410 : create_minmaxagg_path(PlannerInfo *root,
3518 : RelOptInfo *rel,
3519 : PathTarget *target,
3520 : List *mmaggregates,
3521 : List *quals)
3522 : {
3523 410 : MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3524 : Cost initplan_cost;
3525 410 : int initplan_disabled_nodes = 0;
3526 : ListCell *lc;
3527 :
3528 : /* The topmost generated Plan node will be a Result */
3529 410 : pathnode->path.pathtype = T_Result;
3530 410 : pathnode->path.parent = rel;
3531 410 : pathnode->path.pathtarget = target;
3532 : /* For now, assume we are above any joins, so no parameterization */
3533 410 : pathnode->path.param_info = NULL;
3534 410 : pathnode->path.parallel_aware = false;
3535 410 : pathnode->path.parallel_safe = true; /* might change below */
3536 410 : pathnode->path.parallel_workers = 0;
3537 : /* Result is one unordered row */
3538 410 : pathnode->path.rows = 1;
3539 410 : pathnode->path.pathkeys = NIL;
3540 :
3541 410 : pathnode->mmaggregates = mmaggregates;
3542 410 : pathnode->quals = quals;
3543 :
3544 : /* Calculate cost of all the initplans, and check parallel safety */
3545 410 : initplan_cost = 0;
3546 856 : foreach(lc, mmaggregates)
3547 : {
3548 446 : MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3549 :
3550 446 : initplan_disabled_nodes += mminfo->path->disabled_nodes;
3551 446 : initplan_cost += mminfo->pathcost;
3552 446 : if (!mminfo->path->parallel_safe)
3553 110 : pathnode->path.parallel_safe = false;
3554 : }
3555 :
3556 : /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3557 410 : pathnode->path.disabled_nodes = initplan_disabled_nodes;
3558 410 : pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3559 410 : pathnode->path.total_cost = initplan_cost + target->cost.startup +
3560 410 : target->cost.per_tuple + cpu_tuple_cost;
3561 :
3562 : /*
3563 : * Add cost of qual, if any --- but we ignore its selectivity, since our
3564 : * rowcount estimate should be 1 no matter what the qual is.
3565 : */
3566 410 : if (quals)
3567 : {
3568 : QualCost qual_cost;
3569 :
3570 0 : cost_qual_eval(&qual_cost, quals, root);
3571 0 : pathnode->path.startup_cost += qual_cost.startup;
3572 0 : pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3573 : }
3574 :
3575 : /*
3576 : * If the initplans were all parallel-safe, also check safety of the
3577 : * target and quals. (The Result node itself isn't parallelizable, but if
3578 : * we are in a subquery then it can be useful for the outer query to know
3579 : * that this one is parallel-safe.)
3580 : */
3581 410 : if (pathnode->path.parallel_safe)
3582 300 : pathnode->path.parallel_safe =
3583 600 : is_parallel_safe(root, (Node *) target->exprs) &&
3584 300 : is_parallel_safe(root, (Node *) quals);
3585 :
3586 410 : return pathnode;
3587 : }
3588 :
3589 : /*
3590 : * create_windowagg_path
3591 : * Creates a pathnode that represents computation of window functions
3592 : *
3593 : * 'rel' is the parent relation associated with the result
3594 : * 'subpath' is the path representing the source of data
3595 : * 'target' is the PathTarget to be computed
3596 : * 'windowFuncs' is a list of WindowFunc structs
3597 : * 'runCondition' is a list of OpExprs to short-circuit WindowAgg execution
3598 : * 'winclause' is a WindowClause that is common to all the WindowFuncs
3599 : * 'qual' WindowClause.runconditions from lower-level WindowAggPaths.
3600 : * Must always be NIL when topwindow == false
3601 : * 'topwindow' pass as true only for the top-level WindowAgg. False for all
3602 : * intermediate WindowAggs.
3603 : *
3604 : * The input must be sorted according to the WindowClause's PARTITION keys
3605 : * plus ORDER BY keys.
3606 : */
3607 : WindowAggPath *
3608 2754 : create_windowagg_path(PlannerInfo *root,
3609 : RelOptInfo *rel,
3610 : Path *subpath,
3611 : PathTarget *target,
3612 : List *windowFuncs,
3613 : List *runCondition,
3614 : WindowClause *winclause,
3615 : List *qual,
3616 : bool topwindow)
3617 : {
3618 2754 : WindowAggPath *pathnode = makeNode(WindowAggPath);
3619 :
3620 : /* qual can only be set for the topwindow */
3621 : Assert(qual == NIL || topwindow);
3622 :
3623 2754 : pathnode->path.pathtype = T_WindowAgg;
3624 2754 : pathnode->path.parent = rel;
3625 2754 : pathnode->path.pathtarget = target;
3626 : /* For now, assume we are above any joins, so no parameterization */
3627 2754 : pathnode->path.param_info = NULL;
3628 2754 : pathnode->path.parallel_aware = false;
3629 2754 : pathnode->path.parallel_safe = rel->consider_parallel &&
3630 0 : subpath->parallel_safe;
3631 2754 : pathnode->path.parallel_workers = subpath->parallel_workers;
3632 : /* WindowAgg preserves the input sort order */
3633 2754 : pathnode->path.pathkeys = subpath->pathkeys;
3634 :
3635 2754 : pathnode->subpath = subpath;
3636 2754 : pathnode->winclause = winclause;
3637 2754 : pathnode->qual = qual;
3638 2754 : pathnode->runCondition = runCondition;
3639 2754 : pathnode->topwindow = topwindow;
3640 :
3641 : /*
3642 : * For costing purposes, assume that there are no redundant partitioning
3643 : * or ordering columns; it's not worth the trouble to deal with that
3644 : * corner case here. So we just pass the unmodified list lengths to
3645 : * cost_windowagg.
3646 : */
3647 2754 : cost_windowagg(&pathnode->path, root,
3648 : windowFuncs,
3649 : winclause,
3650 : subpath->disabled_nodes,
3651 : subpath->startup_cost,
3652 : subpath->total_cost,
3653 : subpath->rows);
3654 :
3655 : /* add tlist eval cost for each output row */
3656 2754 : pathnode->path.startup_cost += target->cost.startup;
3657 2754 : pathnode->path.total_cost += target->cost.startup +
3658 2754 : target->cost.per_tuple * pathnode->path.rows;
3659 :
3660 2754 : return pathnode;
3661 : }
3662 :
3663 : /*
3664 : * create_setop_path
3665 : * Creates a pathnode that represents computation of INTERSECT or EXCEPT
3666 : *
3667 : * 'rel' is the parent relation associated with the result
3668 : * 'leftpath' is the path representing the left-hand source of data
3669 : * 'rightpath' is the path representing the right-hand source of data
3670 : * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
3671 : * 'strategy' is the implementation strategy (sorted or hashed)
3672 : * 'groupList' is a list of SortGroupClause's representing the grouping
3673 : * 'numGroups' is the estimated number of distinct groups in left-hand input
3674 : * 'outputRows' is the estimated number of output rows
3675 : *
3676 : * leftpath and rightpath must produce the same columns. Moreover, if
3677 : * strategy is SETOP_SORTED, leftpath and rightpath must both be sorted
3678 : * by all the grouping columns.
3679 : */
3680 : SetOpPath *
3681 1264 : create_setop_path(PlannerInfo *root,
3682 : RelOptInfo *rel,
3683 : Path *leftpath,
3684 : Path *rightpath,
3685 : SetOpCmd cmd,
3686 : SetOpStrategy strategy,
3687 : List *groupList,
3688 : double numGroups,
3689 : double outputRows)
3690 : {
3691 1264 : SetOpPath *pathnode = makeNode(SetOpPath);
3692 :
3693 1264 : pathnode->path.pathtype = T_SetOp;
3694 1264 : pathnode->path.parent = rel;
3695 1264 : pathnode->path.pathtarget = rel->reltarget;
3696 : /* For now, assume we are above any joins, so no parameterization */
3697 1264 : pathnode->path.param_info = NULL;
3698 1264 : pathnode->path.parallel_aware = false;
3699 2528 : pathnode->path.parallel_safe = rel->consider_parallel &&
3700 1264 : leftpath->parallel_safe && rightpath->parallel_safe;
3701 1264 : pathnode->path.parallel_workers =
3702 1264 : leftpath->parallel_workers + rightpath->parallel_workers;
3703 : /* SetOp preserves the input sort order if in sort mode */
3704 1264 : pathnode->path.pathkeys =
3705 1264 : (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL;
3706 :
3707 1264 : pathnode->leftpath = leftpath;
3708 1264 : pathnode->rightpath = rightpath;
3709 1264 : pathnode->cmd = cmd;
3710 1264 : pathnode->strategy = strategy;
3711 1264 : pathnode->groupList = groupList;
3712 1264 : pathnode->numGroups = numGroups;
3713 :
3714 : /*
3715 : * Compute cost estimates. As things stand, we end up with the same total
3716 : * cost in this node for sort and hash methods, but different startup
3717 : * costs. This could be refined perhaps, but it'll do for now.
3718 : */
3719 1264 : pathnode->path.disabled_nodes =
3720 1264 : leftpath->disabled_nodes + rightpath->disabled_nodes;
3721 1264 : if (strategy == SETOP_SORTED)
3722 : {
3723 : /*
3724 : * In sorted mode, we can emit output incrementally. Charge one
3725 : * cpu_operator_cost per comparison per input tuple. Like cost_group,
3726 : * we assume all columns get compared at most of the tuples.
3727 : */
3728 662 : pathnode->path.startup_cost =
3729 662 : leftpath->startup_cost + rightpath->startup_cost;
3730 662 : pathnode->path.total_cost =
3731 1324 : leftpath->total_cost + rightpath->total_cost +
3732 662 : cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3733 :
3734 : /*
3735 : * Also charge a small amount per extracted tuple. Like cost_sort,
3736 : * charge only operator cost not cpu_tuple_cost, since SetOp does no
3737 : * qual-checking or projection.
3738 : */
3739 662 : pathnode->path.total_cost += cpu_operator_cost * outputRows;
3740 : }
3741 : else
3742 : {
3743 : Size hashentrysize;
3744 :
3745 : /*
3746 : * In hashed mode, we must read all the input before we can emit
3747 : * anything. Also charge comparison costs to represent the cost of
3748 : * hash table lookups.
3749 : */
3750 602 : pathnode->path.startup_cost =
3751 1204 : leftpath->total_cost + rightpath->total_cost +
3752 602 : cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3753 602 : pathnode->path.total_cost = pathnode->path.startup_cost;
3754 :
3755 : /*
3756 : * Also charge a small amount per extracted tuple. Like cost_sort,
3757 : * charge only operator cost not cpu_tuple_cost, since SetOp does no
3758 : * qual-checking or projection.
3759 : */
3760 602 : pathnode->path.total_cost += cpu_operator_cost * outputRows;
3761 :
3762 : /*
3763 : * Mark the path as disabled if enable_hashagg is off. While this
3764 : * isn't exactly a HashAgg node, it seems close enough to justify
3765 : * letting that switch control it.
3766 : */
3767 602 : if (!enable_hashagg)
3768 114 : pathnode->path.disabled_nodes++;
3769 :
3770 : /*
3771 : * Also disable if it doesn't look like the hashtable will fit into
3772 : * hash_mem.
3773 : */
3774 602 : hashentrysize = MAXALIGN(leftpath->pathtarget->width) +
3775 : MAXALIGN(SizeofMinimalTupleHeader);
3776 602 : if (hashentrysize * numGroups > get_hash_memory_limit())
3777 0 : pathnode->path.disabled_nodes++;
3778 : }
3779 1264 : pathnode->path.rows = outputRows;
3780 :
3781 1264 : return pathnode;
3782 : }
3783 :
3784 : /*
3785 : * create_recursiveunion_path
3786 : * Creates a pathnode that represents a recursive UNION node
3787 : *
3788 : * 'rel' is the parent relation associated with the result
3789 : * 'leftpath' is the source of data for the non-recursive term
3790 : * 'rightpath' is the source of data for the recursive term
3791 : * 'target' is the PathTarget to be computed
3792 : * 'distinctList' is a list of SortGroupClause's representing the grouping
3793 : * 'wtParam' is the ID of Param representing work table
3794 : * 'numGroups' is the estimated number of groups
3795 : *
3796 : * For recursive UNION ALL, distinctList is empty and numGroups is zero
3797 : */
3798 : RecursiveUnionPath *
3799 920 : create_recursiveunion_path(PlannerInfo *root,
3800 : RelOptInfo *rel,
3801 : Path *leftpath,
3802 : Path *rightpath,
3803 : PathTarget *target,
3804 : List *distinctList,
3805 : int wtParam,
3806 : double numGroups)
3807 : {
3808 920 : RecursiveUnionPath *pathnode = makeNode(RecursiveUnionPath);
3809 :
3810 920 : pathnode->path.pathtype = T_RecursiveUnion;
3811 920 : pathnode->path.parent = rel;
3812 920 : pathnode->path.pathtarget = target;
3813 : /* For now, assume we are above any joins, so no parameterization */
3814 920 : pathnode->path.param_info = NULL;
3815 920 : pathnode->path.parallel_aware = false;
3816 1840 : pathnode->path.parallel_safe = rel->consider_parallel &&
3817 920 : leftpath->parallel_safe && rightpath->parallel_safe;
3818 : /* Foolish, but we'll do it like joins for now: */
3819 920 : pathnode->path.parallel_workers = leftpath->parallel_workers;
3820 : /* RecursiveUnion result is always unsorted */
3821 920 : pathnode->path.pathkeys = NIL;
3822 :
3823 920 : pathnode->leftpath = leftpath;
3824 920 : pathnode->rightpath = rightpath;
3825 920 : pathnode->distinctList = distinctList;
3826 920 : pathnode->wtParam = wtParam;
3827 920 : pathnode->numGroups = numGroups;
3828 :
3829 920 : cost_recursive_union(&pathnode->path, leftpath, rightpath);
3830 :
3831 920 : return pathnode;
3832 : }
3833 :
3834 : /*
3835 : * create_lockrows_path
3836 : * Creates a pathnode that represents acquiring row locks
3837 : *
3838 : * 'rel' is the parent relation associated with the result
3839 : * 'subpath' is the path representing the source of data
3840 : * 'rowMarks' is a list of PlanRowMark's
3841 : * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3842 : */
3843 : LockRowsPath *
3844 8222 : create_lockrows_path(PlannerInfo *root, RelOptInfo *rel,
3845 : Path *subpath, List *rowMarks, int epqParam)
3846 : {
3847 8222 : LockRowsPath *pathnode = makeNode(LockRowsPath);
3848 :
3849 8222 : pathnode->path.pathtype = T_LockRows;
3850 8222 : pathnode->path.parent = rel;
3851 : /* LockRows doesn't project, so use source path's pathtarget */
3852 8222 : pathnode->path.pathtarget = subpath->pathtarget;
3853 : /* For now, assume we are above any joins, so no parameterization */
3854 8222 : pathnode->path.param_info = NULL;
3855 8222 : pathnode->path.parallel_aware = false;
3856 8222 : pathnode->path.parallel_safe = false;
3857 8222 : pathnode->path.parallel_workers = 0;
3858 8222 : pathnode->path.rows = subpath->rows;
3859 :
3860 : /*
3861 : * The result cannot be assumed sorted, since locking might cause the sort
3862 : * key columns to be replaced with new values.
3863 : */
3864 8222 : pathnode->path.pathkeys = NIL;
3865 :
3866 8222 : pathnode->subpath = subpath;
3867 8222 : pathnode->rowMarks = rowMarks;
3868 8222 : pathnode->epqParam = epqParam;
3869 :
3870 : /*
3871 : * We should charge something extra for the costs of row locking and
3872 : * possible refetches, but it's hard to say how much. For now, use
3873 : * cpu_tuple_cost per row.
3874 : */
3875 8222 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3876 8222 : pathnode->path.startup_cost = subpath->startup_cost;
3877 8222 : pathnode->path.total_cost = subpath->total_cost +
3878 8222 : cpu_tuple_cost * subpath->rows;
3879 :
3880 8222 : return pathnode;
3881 : }
3882 :
3883 : /*
3884 : * create_modifytable_path
3885 : * Creates a pathnode that represents performing INSERT/UPDATE/DELETE/MERGE
3886 : * mods
3887 : *
3888 : * 'rel' is the parent relation associated with the result
3889 : * 'subpath' is a Path producing source data
3890 : * 'operation' is the operation type
3891 : * 'canSetTag' is true if we set the command tag/es_processed
3892 : * 'nominalRelation' is the parent RT index for use of EXPLAIN
3893 : * 'rootRelation' is the partitioned/inherited table root RTI, or 0 if none
3894 : * 'partColsUpdated' is true if any partitioning columns are being updated,
3895 : * either from the target relation or a descendent partitioned table.
3896 : * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
3897 : * 'updateColnosLists' is a list of UPDATE target column number lists
3898 : * (one sublist per rel); or NIL if not an UPDATE
3899 : * 'withCheckOptionLists' is a list of WCO lists (one per rel)
3900 : * 'returningLists' is a list of RETURNING tlists (one per rel)
3901 : * 'rowMarks' is a list of PlanRowMarks (non-locking only)
3902 : * 'onconflict' is the ON CONFLICT clause, or NULL
3903 : * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3904 : * 'mergeActionLists' is a list of lists of MERGE actions (one per rel)
3905 : * 'mergeJoinConditions' is a list of join conditions for MERGE (one per rel)
3906 : */
3907 : ModifyTablePath *
3908 86638 : create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
3909 : Path *subpath,
3910 : CmdType operation, bool canSetTag,
3911 : Index nominalRelation, Index rootRelation,
3912 : bool partColsUpdated,
3913 : List *resultRelations,
3914 : List *updateColnosLists,
3915 : List *withCheckOptionLists, List *returningLists,
3916 : List *rowMarks, OnConflictExpr *onconflict,
3917 : List *mergeActionLists, List *mergeJoinConditions,
3918 : int epqParam)
3919 : {
3920 86638 : ModifyTablePath *pathnode = makeNode(ModifyTablePath);
3921 :
3922 : Assert(operation == CMD_MERGE ||
3923 : (operation == CMD_UPDATE ?
3924 : list_length(resultRelations) == list_length(updateColnosLists) :
3925 : updateColnosLists == NIL));
3926 : Assert(withCheckOptionLists == NIL ||
3927 : list_length(resultRelations) == list_length(withCheckOptionLists));
3928 : Assert(returningLists == NIL ||
3929 : list_length(resultRelations) == list_length(returningLists));
3930 :
3931 86638 : pathnode->path.pathtype = T_ModifyTable;
3932 86638 : pathnode->path.parent = rel;
3933 : /* pathtarget is not interesting, just make it minimally valid */
3934 86638 : pathnode->path.pathtarget = rel->reltarget;
3935 : /* For now, assume we are above any joins, so no parameterization */
3936 86638 : pathnode->path.param_info = NULL;
3937 86638 : pathnode->path.parallel_aware = false;
3938 86638 : pathnode->path.parallel_safe = false;
3939 86638 : pathnode->path.parallel_workers = 0;
3940 86638 : pathnode->path.pathkeys = NIL;
3941 :
3942 : /*
3943 : * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3944 : *
3945 : * Currently, we don't charge anything extra for the actual table
3946 : * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3947 : * expressions if any. It would only be window dressing, since
3948 : * ModifyTable is always a top-level node and there is no way for the
3949 : * costs to change any higher-level planning choices. But we might want
3950 : * to make it look better sometime.
3951 : */
3952 86638 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3953 86638 : pathnode->path.startup_cost = subpath->startup_cost;
3954 86638 : pathnode->path.total_cost = subpath->total_cost;
3955 86638 : if (returningLists != NIL)
3956 : {
3957 2888 : pathnode->path.rows = subpath->rows;
3958 :
3959 : /*
3960 : * Set width to match the subpath output. XXX this is totally wrong:
3961 : * we should return an average of the RETURNING tlist widths. But
3962 : * it's what happened historically, and improving it is a task for
3963 : * another day. (Again, it's mostly window dressing.)
3964 : */
3965 2888 : pathnode->path.pathtarget->width = subpath->pathtarget->width;
3966 : }
3967 : else
3968 : {
3969 83750 : pathnode->path.rows = 0;
3970 83750 : pathnode->path.pathtarget->width = 0;
3971 : }
3972 :
3973 86638 : pathnode->subpath = subpath;
3974 86638 : pathnode->operation = operation;
3975 86638 : pathnode->canSetTag = canSetTag;
3976 86638 : pathnode->nominalRelation = nominalRelation;
3977 86638 : pathnode->rootRelation = rootRelation;
3978 86638 : pathnode->partColsUpdated = partColsUpdated;
3979 86638 : pathnode->resultRelations = resultRelations;
3980 86638 : pathnode->updateColnosLists = updateColnosLists;
3981 86638 : pathnode->withCheckOptionLists = withCheckOptionLists;
3982 86638 : pathnode->returningLists = returningLists;
3983 86638 : pathnode->rowMarks = rowMarks;
3984 86638 : pathnode->onconflict = onconflict;
3985 86638 : pathnode->epqParam = epqParam;
3986 86638 : pathnode->mergeActionLists = mergeActionLists;
3987 86638 : pathnode->mergeJoinConditions = mergeJoinConditions;
3988 :
3989 86638 : return pathnode;
3990 : }
3991 :
3992 : /*
3993 : * create_limit_path
3994 : * Creates a pathnode that represents performing LIMIT/OFFSET
3995 : *
3996 : * In addition to providing the actual OFFSET and LIMIT expressions,
3997 : * the caller must provide estimates of their values for costing purposes.
3998 : * The estimates are as computed by preprocess_limit(), ie, 0 represents
3999 : * the clause not being present, and -1 means it's present but we could
4000 : * not estimate its value.
4001 : *
4002 : * 'rel' is the parent relation associated with the result
4003 : * 'subpath' is the path representing the source of data
4004 : * 'limitOffset' is the actual OFFSET expression, or NULL
4005 : * 'limitCount' is the actual LIMIT expression, or NULL
4006 : * 'offset_est' is the estimated value of the OFFSET expression
4007 : * 'count_est' is the estimated value of the LIMIT expression
4008 : */
4009 : LimitPath *
4010 6054 : create_limit_path(PlannerInfo *root, RelOptInfo *rel,
4011 : Path *subpath,
4012 : Node *limitOffset, Node *limitCount,
4013 : LimitOption limitOption,
4014 : int64 offset_est, int64 count_est)
4015 : {
4016 6054 : LimitPath *pathnode = makeNode(LimitPath);
4017 :
4018 6054 : pathnode->path.pathtype = T_Limit;
4019 6054 : pathnode->path.parent = rel;
4020 : /* Limit doesn't project, so use source path's pathtarget */
4021 6054 : pathnode->path.pathtarget = subpath->pathtarget;
4022 : /* For now, assume we are above any joins, so no parameterization */
4023 6054 : pathnode->path.param_info = NULL;
4024 6054 : pathnode->path.parallel_aware = false;
4025 8466 : pathnode->path.parallel_safe = rel->consider_parallel &&
4026 2412 : subpath->parallel_safe;
4027 6054 : pathnode->path.parallel_workers = subpath->parallel_workers;
4028 6054 : pathnode->path.rows = subpath->rows;
4029 6054 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
4030 6054 : pathnode->path.startup_cost = subpath->startup_cost;
4031 6054 : pathnode->path.total_cost = subpath->total_cost;
4032 6054 : pathnode->path.pathkeys = subpath->pathkeys;
4033 6054 : pathnode->subpath = subpath;
4034 6054 : pathnode->limitOffset = limitOffset;
4035 6054 : pathnode->limitCount = limitCount;
4036 6054 : pathnode->limitOption = limitOption;
4037 :
4038 : /*
4039 : * Adjust the output rows count and costs according to the offset/limit.
4040 : */
4041 6054 : adjust_limit_rows_costs(&pathnode->path.rows,
4042 : &pathnode->path.startup_cost,
4043 : &pathnode->path.total_cost,
4044 : offset_est, count_est);
4045 :
4046 6054 : return pathnode;
4047 : }
4048 :
4049 : /*
4050 : * adjust_limit_rows_costs
4051 : * Adjust the size and cost estimates for a LimitPath node according to the
4052 : * offset/limit.
4053 : *
4054 : * This is only a cosmetic issue if we are at top level, but if we are
4055 : * building a subquery then it's important to report correct info to the outer
4056 : * planner.
4057 : *
4058 : * When the offset or count couldn't be estimated, use 10% of the estimated
4059 : * number of rows emitted from the subpath.
4060 : *
4061 : * XXX we don't bother to add eval costs of the offset/limit expressions
4062 : * themselves to the path costs. In theory we should, but in most cases those
4063 : * expressions are trivial and it's just not worth the trouble.
4064 : */
4065 : void
4066 6238 : adjust_limit_rows_costs(double *rows, /* in/out parameter */
4067 : Cost *startup_cost, /* in/out parameter */
4068 : Cost *total_cost, /* in/out parameter */
4069 : int64 offset_est,
4070 : int64 count_est)
4071 : {
4072 6238 : double input_rows = *rows;
4073 6238 : Cost input_startup_cost = *startup_cost;
4074 6238 : Cost input_total_cost = *total_cost;
4075 :
4076 6238 : if (offset_est != 0)
4077 : {
4078 : double offset_rows;
4079 :
4080 694 : if (offset_est > 0)
4081 670 : offset_rows = (double) offset_est;
4082 : else
4083 24 : offset_rows = clamp_row_est(input_rows * 0.10);
4084 694 : if (offset_rows > *rows)
4085 34 : offset_rows = *rows;
4086 694 : if (input_rows > 0)
4087 694 : *startup_cost +=
4088 694 : (input_total_cost - input_startup_cost)
4089 694 : * offset_rows / input_rows;
4090 694 : *rows -= offset_rows;
4091 694 : if (*rows < 1)
4092 42 : *rows = 1;
4093 : }
4094 :
4095 6238 : if (count_est != 0)
4096 : {
4097 : double count_rows;
4098 :
4099 6180 : if (count_est > 0)
4100 6174 : count_rows = (double) count_est;
4101 : else
4102 6 : count_rows = clamp_row_est(input_rows * 0.10);
4103 6180 : if (count_rows > *rows)
4104 236 : count_rows = *rows;
4105 6180 : if (input_rows > 0)
4106 6180 : *total_cost = *startup_cost +
4107 6180 : (input_total_cost - input_startup_cost)
4108 6180 : * count_rows / input_rows;
4109 6180 : *rows = count_rows;
4110 6180 : if (*rows < 1)
4111 0 : *rows = 1;
4112 : }
4113 6238 : }
4114 :
4115 :
4116 : /*
4117 : * reparameterize_path
4118 : * Attempt to modify a Path to have greater parameterization
4119 : *
4120 : * We use this to attempt to bring all child paths of an appendrel to the
4121 : * same parameterization level, ensuring that they all enforce the same set
4122 : * of join quals (and thus that that parameterization can be attributed to
4123 : * an append path built from such paths). Currently, only a few path types
4124 : * are supported here, though more could be added at need. We return NULL
4125 : * if we can't reparameterize the given path.
4126 : *
4127 : * Note: we intentionally do not pass created paths to add_path(); it would
4128 : * possibly try to delete them on the grounds of being cost-inferior to the
4129 : * paths they were made from, and we don't want that. Paths made here are
4130 : * not necessarily of general-purpose usefulness, but they can be useful
4131 : * as members of an append path.
4132 : */
4133 : Path *
4134 356 : reparameterize_path(PlannerInfo *root, Path *path,
4135 : Relids required_outer,
4136 : double loop_count)
4137 : {
4138 356 : RelOptInfo *rel = path->parent;
4139 :
4140 : /* Can only increase, not decrease, path's parameterization */
4141 356 : if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
4142 0 : return NULL;
4143 356 : switch (path->pathtype)
4144 : {
4145 264 : case T_SeqScan:
4146 264 : return create_seqscan_path(root, rel, required_outer, 0);
4147 0 : case T_SampleScan:
4148 0 : return (Path *) create_samplescan_path(root, rel, required_outer);
4149 0 : case T_IndexScan:
4150 : case T_IndexOnlyScan:
4151 : {
4152 0 : IndexPath *ipath = (IndexPath *) path;
4153 0 : IndexPath *newpath = makeNode(IndexPath);
4154 :
4155 : /*
4156 : * We can't use create_index_path directly, and would not want
4157 : * to because it would re-compute the indexqual conditions
4158 : * which is wasted effort. Instead we hack things a bit:
4159 : * flat-copy the path node, revise its param_info, and redo
4160 : * the cost estimate.
4161 : */
4162 0 : memcpy(newpath, ipath, sizeof(IndexPath));
4163 0 : newpath->path.param_info =
4164 0 : get_baserel_parampathinfo(root, rel, required_outer);
4165 0 : cost_index(newpath, root, loop_count, false);
4166 0 : return (Path *) newpath;
4167 : }
4168 0 : case T_BitmapHeapScan:
4169 : {
4170 0 : BitmapHeapPath *bpath = (BitmapHeapPath *) path;
4171 :
4172 0 : return (Path *) create_bitmap_heap_path(root,
4173 : rel,
4174 : bpath->bitmapqual,
4175 : required_outer,
4176 : loop_count, 0);
4177 : }
4178 0 : case T_SubqueryScan:
4179 : {
4180 0 : SubqueryScanPath *spath = (SubqueryScanPath *) path;
4181 0 : Path *subpath = spath->subpath;
4182 : bool trivial_pathtarget;
4183 :
4184 : /*
4185 : * If existing node has zero extra cost, we must have decided
4186 : * its target is trivial. (The converse is not true, because
4187 : * it might have a trivial target but quals to enforce; but in
4188 : * that case the new node will too, so it doesn't matter
4189 : * whether we get the right answer here.)
4190 : */
4191 0 : trivial_pathtarget =
4192 0 : (subpath->total_cost == spath->path.total_cost);
4193 :
4194 0 : return (Path *) create_subqueryscan_path(root,
4195 : rel,
4196 : subpath,
4197 : trivial_pathtarget,
4198 : spath->path.pathkeys,
4199 : required_outer);
4200 : }
4201 60 : case T_Result:
4202 : /* Supported only for RTE_RESULT scan paths */
4203 60 : if (IsA(path, Path))
4204 60 : return create_resultscan_path(root, rel, required_outer);
4205 0 : break;
4206 0 : case T_Append:
4207 : {
4208 0 : AppendPath *apath = (AppendPath *) path;
4209 0 : List *childpaths = NIL;
4210 0 : List *partialpaths = NIL;
4211 : int i;
4212 : ListCell *lc;
4213 :
4214 : /* Reparameterize the children */
4215 0 : i = 0;
4216 0 : foreach(lc, apath->subpaths)
4217 : {
4218 0 : Path *spath = (Path *) lfirst(lc);
4219 :
4220 0 : spath = reparameterize_path(root, spath,
4221 : required_outer,
4222 : loop_count);
4223 0 : if (spath == NULL)
4224 0 : return NULL;
4225 : /* We have to re-split the regular and partial paths */
4226 0 : if (i < apath->first_partial_path)
4227 0 : childpaths = lappend(childpaths, spath);
4228 : else
4229 0 : partialpaths = lappend(partialpaths, spath);
4230 0 : i++;
4231 : }
4232 0 : return (Path *)
4233 0 : create_append_path(root, rel, childpaths, partialpaths,
4234 : apath->path.pathkeys, required_outer,
4235 : apath->path.parallel_workers,
4236 0 : apath->path.parallel_aware,
4237 : -1);
4238 : }
4239 0 : case T_Material:
4240 : {
4241 0 : MaterialPath *mpath = (MaterialPath *) path;
4242 0 : Path *spath = mpath->subpath;
4243 :
4244 0 : spath = reparameterize_path(root, spath,
4245 : required_outer,
4246 : loop_count);
4247 0 : if (spath == NULL)
4248 0 : return NULL;
4249 0 : return (Path *) create_material_path(rel, spath);
4250 : }
4251 0 : case T_Memoize:
4252 : {
4253 0 : MemoizePath *mpath = (MemoizePath *) path;
4254 0 : Path *spath = mpath->subpath;
4255 :
4256 0 : spath = reparameterize_path(root, spath,
4257 : required_outer,
4258 : loop_count);
4259 0 : if (spath == NULL)
4260 0 : return NULL;
4261 0 : return (Path *) create_memoize_path(root, rel,
4262 : spath,
4263 : mpath->param_exprs,
4264 : mpath->hash_operators,
4265 0 : mpath->singlerow,
4266 0 : mpath->binary_mode,
4267 : mpath->est_calls);
4268 : }
4269 32 : default:
4270 32 : break;
4271 : }
4272 32 : return NULL;
4273 : }
4274 :
4275 : /*
4276 : * reparameterize_path_by_child
4277 : * Given a path parameterized by the parent of the given child relation,
4278 : * translate the path to be parameterized by the given child relation.
4279 : *
4280 : * Most fields in the path are not changed, but any expressions must be
4281 : * adjusted to refer to the correct varnos, and any subpaths must be
4282 : * recursively reparameterized. Other fields that refer to specific relids
4283 : * also need adjustment.
4284 : *
4285 : * The cost, number of rows, width and parallel path properties depend upon
4286 : * path->parent, which does not change during the translation. So we need
4287 : * not change those.
4288 : *
4289 : * Currently, only a few path types are supported here, though more could be
4290 : * added at need. We return NULL if we can't reparameterize the given path.
4291 : *
4292 : * Note that this function can change referenced RangeTblEntries, RelOptInfos
4293 : * and IndexOptInfos as well as the Path structures. Therefore, it's only safe
4294 : * to call during create_plan(), when we have made a final choice of which Path
4295 : * to use for each RangeTblEntry/RelOptInfo/IndexOptInfo.
4296 : *
4297 : * Keep this code in sync with path_is_reparameterizable_by_child()!
4298 : */
4299 : Path *
4300 92518 : reparameterize_path_by_child(PlannerInfo *root, Path *path,
4301 : RelOptInfo *child_rel)
4302 : {
4303 : Path *new_path;
4304 : ParamPathInfo *new_ppi;
4305 : ParamPathInfo *old_ppi;
4306 : Relids required_outer;
4307 :
4308 : #define ADJUST_CHILD_ATTRS(node) \
4309 : ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4310 : (Node *) (node), \
4311 : child_rel, \
4312 : child_rel->top_parent))
4313 :
4314 : #define REPARAMETERIZE_CHILD_PATH(path) \
4315 : do { \
4316 : (path) = reparameterize_path_by_child(root, (path), child_rel); \
4317 : if ((path) == NULL) \
4318 : return NULL; \
4319 : } while(0)
4320 :
4321 : #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4322 : do { \
4323 : if ((pathlist) != NIL) \
4324 : { \
4325 : (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4326 : child_rel); \
4327 : if ((pathlist) == NIL) \
4328 : return NULL; \
4329 : } \
4330 : } while(0)
4331 :
4332 : /*
4333 : * If the path is not parameterized by the parent of the given relation,
4334 : * it doesn't need reparameterization.
4335 : */
4336 92518 : if (!path->param_info ||
4337 46610 : !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4338 91546 : return path;
4339 :
4340 : /*
4341 : * If possible, reparameterize the given path.
4342 : *
4343 : * This function is currently only applied to the inner side of a nestloop
4344 : * join that is being partitioned by the partitionwise-join code. Hence,
4345 : * we need only support path types that plausibly arise in that context.
4346 : * (In particular, supporting sorted path types would be a waste of code
4347 : * and cycles: even if we translated them here, they'd just lose in
4348 : * subsequent cost comparisons.) If we do see an unsupported path type,
4349 : * that just means we won't be able to generate a partitionwise-join plan
4350 : * using that path type.
4351 : */
4352 972 : switch (nodeTag(path))
4353 : {
4354 228 : case T_Path:
4355 228 : new_path = path;
4356 228 : ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4357 228 : if (path->pathtype == T_SampleScan)
4358 : {
4359 48 : Index scan_relid = path->parent->relid;
4360 : RangeTblEntry *rte;
4361 :
4362 : /* it should be a base rel with a tablesample clause... */
4363 : Assert(scan_relid > 0);
4364 48 : rte = planner_rt_fetch(scan_relid, root);
4365 : Assert(rte->rtekind == RTE_RELATION);
4366 : Assert(rte->tablesample != NULL);
4367 :
4368 48 : ADJUST_CHILD_ATTRS(rte->tablesample);
4369 : }
4370 228 : break;
4371 :
4372 492 : case T_IndexPath:
4373 : {
4374 492 : IndexPath *ipath = (IndexPath *) path;
4375 :
4376 492 : ADJUST_CHILD_ATTRS(ipath->indexinfo->indrestrictinfo);
4377 492 : ADJUST_CHILD_ATTRS(ipath->indexclauses);
4378 492 : new_path = (Path *) ipath;
4379 : }
4380 492 : break;
4381 :
4382 48 : case T_BitmapHeapPath:
4383 : {
4384 48 : BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4385 :
4386 48 : ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4387 48 : REPARAMETERIZE_CHILD_PATH(bhpath->bitmapqual);
4388 48 : new_path = (Path *) bhpath;
4389 : }
4390 48 : break;
4391 :
4392 24 : case T_BitmapAndPath:
4393 : {
4394 24 : BitmapAndPath *bapath = (BitmapAndPath *) path;
4395 :
4396 24 : REPARAMETERIZE_CHILD_PATH_LIST(bapath->bitmapquals);
4397 24 : new_path = (Path *) bapath;
4398 : }
4399 24 : break;
4400 :
4401 24 : case T_BitmapOrPath:
4402 : {
4403 24 : BitmapOrPath *bopath = (BitmapOrPath *) path;
4404 :
4405 24 : REPARAMETERIZE_CHILD_PATH_LIST(bopath->bitmapquals);
4406 24 : new_path = (Path *) bopath;
4407 : }
4408 24 : break;
4409 :
4410 0 : case T_ForeignPath:
4411 : {
4412 0 : ForeignPath *fpath = (ForeignPath *) path;
4413 : ReparameterizeForeignPathByChild_function rfpc_func;
4414 :
4415 0 : ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4416 0 : if (fpath->fdw_outerpath)
4417 0 : REPARAMETERIZE_CHILD_PATH(fpath->fdw_outerpath);
4418 0 : if (fpath->fdw_restrictinfo)
4419 0 : ADJUST_CHILD_ATTRS(fpath->fdw_restrictinfo);
4420 :
4421 : /* Hand over to FDW if needed. */
4422 0 : rfpc_func =
4423 0 : path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4424 0 : if (rfpc_func)
4425 0 : fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4426 : child_rel);
4427 0 : new_path = (Path *) fpath;
4428 : }
4429 0 : break;
4430 :
4431 0 : case T_CustomPath:
4432 : {
4433 0 : CustomPath *cpath = (CustomPath *) path;
4434 :
4435 0 : ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4436 0 : REPARAMETERIZE_CHILD_PATH_LIST(cpath->custom_paths);
4437 0 : if (cpath->custom_restrictinfo)
4438 0 : ADJUST_CHILD_ATTRS(cpath->custom_restrictinfo);
4439 0 : if (cpath->methods &&
4440 0 : cpath->methods->ReparameterizeCustomPathByChild)
4441 0 : cpath->custom_private =
4442 0 : cpath->methods->ReparameterizeCustomPathByChild(root,
4443 : cpath->custom_private,
4444 : child_rel);
4445 0 : new_path = (Path *) cpath;
4446 : }
4447 0 : break;
4448 :
4449 36 : case T_NestPath:
4450 : {
4451 36 : NestPath *npath = (NestPath *) path;
4452 36 : JoinPath *jpath = (JoinPath *) npath;
4453 :
4454 36 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4455 36 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4456 36 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4457 36 : new_path = (Path *) npath;
4458 : }
4459 36 : break;
4460 :
4461 0 : case T_MergePath:
4462 : {
4463 0 : MergePath *mpath = (MergePath *) path;
4464 0 : JoinPath *jpath = (JoinPath *) mpath;
4465 :
4466 0 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4467 0 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4468 0 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4469 0 : ADJUST_CHILD_ATTRS(mpath->path_mergeclauses);
4470 0 : new_path = (Path *) mpath;
4471 : }
4472 0 : break;
4473 :
4474 48 : case T_HashPath:
4475 : {
4476 48 : HashPath *hpath = (HashPath *) path;
4477 48 : JoinPath *jpath = (JoinPath *) hpath;
4478 :
4479 48 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4480 48 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4481 48 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4482 48 : ADJUST_CHILD_ATTRS(hpath->path_hashclauses);
4483 48 : new_path = (Path *) hpath;
4484 : }
4485 48 : break;
4486 :
4487 24 : case T_AppendPath:
4488 : {
4489 24 : AppendPath *apath = (AppendPath *) path;
4490 :
4491 24 : REPARAMETERIZE_CHILD_PATH_LIST(apath->subpaths);
4492 24 : new_path = (Path *) apath;
4493 : }
4494 24 : break;
4495 :
4496 0 : case T_MaterialPath:
4497 : {
4498 0 : MaterialPath *mpath = (MaterialPath *) path;
4499 :
4500 0 : REPARAMETERIZE_CHILD_PATH(mpath->subpath);
4501 0 : new_path = (Path *) mpath;
4502 : }
4503 0 : break;
4504 :
4505 48 : case T_MemoizePath:
4506 : {
4507 48 : MemoizePath *mpath = (MemoizePath *) path;
4508 :
4509 48 : REPARAMETERIZE_CHILD_PATH(mpath->subpath);
4510 48 : ADJUST_CHILD_ATTRS(mpath->param_exprs);
4511 48 : new_path = (Path *) mpath;
4512 : }
4513 48 : break;
4514 :
4515 0 : case T_GatherPath:
4516 : {
4517 0 : GatherPath *gpath = (GatherPath *) path;
4518 :
4519 0 : REPARAMETERIZE_CHILD_PATH(gpath->subpath);
4520 0 : new_path = (Path *) gpath;
4521 : }
4522 0 : break;
4523 :
4524 0 : default:
4525 : /* We don't know how to reparameterize this path. */
4526 0 : return NULL;
4527 : }
4528 :
4529 : /*
4530 : * Adjust the parameterization information, which refers to the topmost
4531 : * parent. The topmost parent can be multiple levels away from the given
4532 : * child, hence use multi-level expression adjustment routines.
4533 : */
4534 972 : old_ppi = new_path->param_info;
4535 : required_outer =
4536 972 : adjust_child_relids_multilevel(root, old_ppi->ppi_req_outer,
4537 : child_rel,
4538 972 : child_rel->top_parent);
4539 :
4540 : /* If we already have a PPI for this parameterization, just return it */
4541 972 : new_ppi = find_param_path_info(new_path->parent, required_outer);
4542 :
4543 : /*
4544 : * If not, build a new one and link it to the list of PPIs. For the same
4545 : * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4546 : * context the given RelOptInfo is in.
4547 : */
4548 972 : if (new_ppi == NULL)
4549 : {
4550 : MemoryContext oldcontext;
4551 828 : RelOptInfo *rel = path->parent;
4552 :
4553 828 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4554 :
4555 828 : new_ppi = makeNode(ParamPathInfo);
4556 828 : new_ppi->ppi_req_outer = bms_copy(required_outer);
4557 828 : new_ppi->ppi_rows = old_ppi->ppi_rows;
4558 828 : new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4559 828 : ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4560 828 : new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4561 828 : rel->ppilist = lappend(rel->ppilist, new_ppi);
4562 :
4563 828 : MemoryContextSwitchTo(oldcontext);
4564 : }
4565 972 : bms_free(required_outer);
4566 :
4567 972 : new_path->param_info = new_ppi;
4568 :
4569 : /*
4570 : * Adjust the path target if the parent of the outer relation is
4571 : * referenced in the targetlist. This can happen when only the parent of
4572 : * outer relation is laterally referenced in this relation.
4573 : */
4574 972 : if (bms_overlap(path->parent->lateral_relids,
4575 972 : child_rel->top_parent_relids))
4576 : {
4577 480 : new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4578 480 : ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4579 : }
4580 :
4581 972 : return new_path;
4582 : }
4583 :
4584 : /*
4585 : * path_is_reparameterizable_by_child
4586 : * Given a path parameterized by the parent of the given child relation,
4587 : * see if it can be translated to be parameterized by the child relation.
4588 : *
4589 : * This must return true if and only if reparameterize_path_by_child()
4590 : * would succeed on this path. Currently it's sufficient to verify that
4591 : * the path and all of its subpaths (if any) are of the types handled by
4592 : * that function. However, subpaths that are not parameterized can be
4593 : * disregarded since they won't require translation.
4594 : */
4595 : bool
4596 34752 : path_is_reparameterizable_by_child(Path *path, RelOptInfo *child_rel)
4597 : {
4598 : #define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4599 : do { \
4600 : if (!path_is_reparameterizable_by_child(path, child_rel)) \
4601 : return false; \
4602 : } while(0)
4603 :
4604 : #define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4605 : do { \
4606 : if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4607 : return false; \
4608 : } while(0)
4609 :
4610 : /*
4611 : * If the path is not parameterized by the parent of the given relation,
4612 : * it doesn't need reparameterization.
4613 : */
4614 34752 : if (!path->param_info ||
4615 34344 : !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4616 984 : return true;
4617 :
4618 : /*
4619 : * Check that the path type is one that reparameterize_path_by_child() can
4620 : * handle, and recursively check subpaths.
4621 : */
4622 33768 : switch (nodeTag(path))
4623 : {
4624 22632 : case T_Path:
4625 : case T_IndexPath:
4626 22632 : break;
4627 :
4628 48 : case T_BitmapHeapPath:
4629 : {
4630 48 : BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4631 :
4632 48 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(bhpath->bitmapqual);
4633 : }
4634 48 : break;
4635 :
4636 24 : case T_BitmapAndPath:
4637 : {
4638 24 : BitmapAndPath *bapath = (BitmapAndPath *) path;
4639 :
4640 24 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(bapath->bitmapquals);
4641 : }
4642 24 : break;
4643 :
4644 24 : case T_BitmapOrPath:
4645 : {
4646 24 : BitmapOrPath *bopath = (BitmapOrPath *) path;
4647 :
4648 24 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(bopath->bitmapquals);
4649 : }
4650 24 : break;
4651 :
4652 148 : case T_ForeignPath:
4653 : {
4654 148 : ForeignPath *fpath = (ForeignPath *) path;
4655 :
4656 148 : if (fpath->fdw_outerpath)
4657 0 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(fpath->fdw_outerpath);
4658 : }
4659 148 : break;
4660 :
4661 0 : case T_CustomPath:
4662 : {
4663 0 : CustomPath *cpath = (CustomPath *) path;
4664 :
4665 0 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(cpath->custom_paths);
4666 : }
4667 0 : break;
4668 :
4669 1248 : case T_NestPath:
4670 : case T_MergePath:
4671 : case T_HashPath:
4672 : {
4673 1248 : JoinPath *jpath = (JoinPath *) path;
4674 :
4675 1248 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(jpath->outerjoinpath);
4676 1248 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(jpath->innerjoinpath);
4677 : }
4678 1248 : break;
4679 :
4680 192 : case T_AppendPath:
4681 : {
4682 192 : AppendPath *apath = (AppendPath *) path;
4683 :
4684 192 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(apath->subpaths);
4685 : }
4686 192 : break;
4687 :
4688 0 : case T_MaterialPath:
4689 : {
4690 0 : MaterialPath *mpath = (MaterialPath *) path;
4691 :
4692 0 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(mpath->subpath);
4693 : }
4694 0 : break;
4695 :
4696 9452 : case T_MemoizePath:
4697 : {
4698 9452 : MemoizePath *mpath = (MemoizePath *) path;
4699 :
4700 9452 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(mpath->subpath);
4701 : }
4702 9452 : break;
4703 :
4704 0 : case T_GatherPath:
4705 : {
4706 0 : GatherPath *gpath = (GatherPath *) path;
4707 :
4708 0 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(gpath->subpath);
4709 : }
4710 0 : break;
4711 :
4712 0 : default:
4713 : /* We don't know how to reparameterize this path. */
4714 0 : return false;
4715 : }
4716 :
4717 33768 : return true;
4718 : }
4719 :
4720 : /*
4721 : * reparameterize_pathlist_by_child
4722 : * Helper function to reparameterize a list of paths by given child rel.
4723 : *
4724 : * Returns NIL to indicate failure, so pathlist had better not be NIL.
4725 : */
4726 : static List *
4727 72 : reparameterize_pathlist_by_child(PlannerInfo *root,
4728 : List *pathlist,
4729 : RelOptInfo *child_rel)
4730 : {
4731 : ListCell *lc;
4732 72 : List *result = NIL;
4733 :
4734 216 : foreach(lc, pathlist)
4735 : {
4736 144 : Path *path = reparameterize_path_by_child(root, lfirst(lc),
4737 : child_rel);
4738 :
4739 144 : if (path == NULL)
4740 : {
4741 0 : list_free(result);
4742 0 : return NIL;
4743 : }
4744 :
4745 144 : result = lappend(result, path);
4746 : }
4747 :
4748 72 : return result;
4749 : }
4750 :
4751 : /*
4752 : * pathlist_is_reparameterizable_by_child
4753 : * Helper function to check if a list of paths can be reparameterized.
4754 : */
4755 : static bool
4756 240 : pathlist_is_reparameterizable_by_child(List *pathlist, RelOptInfo *child_rel)
4757 : {
4758 : ListCell *lc;
4759 :
4760 720 : foreach(lc, pathlist)
4761 : {
4762 480 : Path *path = (Path *) lfirst(lc);
4763 :
4764 480 : if (!path_is_reparameterizable_by_child(path, child_rel))
4765 0 : return false;
4766 : }
4767 :
4768 240 : return true;
4769 : }
|